jtag: linuxgpiod: drop extra parenthesis
[openocd.git] / src / target / nds32.c
1 /***************************************************************************
2 * Copyright (C) 2013 Andes Technology *
3 * Hsiangkai Wang <hkwang@andestech.com> *
4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
17 ***************************************************************************/
18
19 #ifdef HAVE_CONFIG_H
20 #include "config.h"
21 #endif
22
23 #include <helper/log.h>
24 #include <helper/binarybuffer.h>
25 #include "nds32.h"
26 #include "nds32_aice.h"
27 #include "nds32_tlb.h"
28 #include "nds32_disassembler.h"
29
30 struct nds32_edm_operation nds32_edm_ops[NDS32_EDM_OPERATION_MAX_NUM];
31 uint32_t nds32_edm_ops_num;
32
33 const char *nds32_debug_type_name[11] = {
34 "SOFTWARE BREAK",
35 "SOFTWARE BREAK_16",
36 "HARDWARE BREAKPOINT",
37 "DATA ADDR WATCHPOINT PRECISE",
38 "DATA VALUE WATCHPOINT PRECISE",
39 "DATA VALUE WATCHPOINT IMPRECISE",
40 "DEBUG INTERRUPT",
41 "HARDWARE SINGLE STEP",
42 "DATA ADDR WATCHPOINT NEXT PRECISE",
43 "DATA VALUE WATCHPOINT NEXT PRECISE",
44 "LOAD STORE GLOBAL STOP",
45 };
46
47 static const int nds32_lm_size_table[16] = {
48 4 * 1024,
49 8 * 1024,
50 16 * 1024,
51 32 * 1024,
52 64 * 1024,
53 128 * 1024,
54 256 * 1024,
55 512 * 1024,
56 1024 * 1024,
57 1 * 1024,
58 2 * 1024,
59 };
60
61 static const int nds32_line_size_table[6] = {
62 0,
63 8,
64 16,
65 32,
66 64,
67 128,
68 };
69
70 static int nds32_get_core_reg(struct reg *reg)
71 {
72 int retval;
73 struct nds32_reg *reg_arch_info = reg->arch_info;
74 struct target *target = reg_arch_info->target;
75 struct nds32 *nds32 = target_to_nds32(target);
76 struct aice_port_s *aice = target_to_aice(target);
77
78 if (target->state != TARGET_HALTED) {
79 LOG_ERROR("Target not halted");
80 return ERROR_TARGET_NOT_HALTED;
81 }
82
83 if (reg->valid) {
84 uint32_t val = buf_get_u32(reg_arch_info->value, 0, 32);
85 LOG_DEBUG("reading register(cached) %" PRIi32 "(%s), value: 0x%8.8" PRIx32,
86 reg_arch_info->num, reg->name, val);
87 return ERROR_OK;
88 }
89
90 int mapped_regnum = nds32->register_map(nds32, reg_arch_info->num);
91
92 if (reg_arch_info->enable == false) {
93 buf_set_u32(reg_arch_info->value, 0, 32, NDS32_REGISTER_DISABLE);
94 retval = ERROR_FAIL;
95 } else {
96 uint32_t val = 0;
97 if ((nds32->fpu_enable == false)
98 && (nds32_reg_type(mapped_regnum) == NDS32_REG_TYPE_FPU)) {
99 retval = ERROR_OK;
100 } else if ((nds32->audio_enable == false)
101 && (nds32_reg_type(mapped_regnum) == NDS32_REG_TYPE_AUMR)) {
102 retval = ERROR_OK;
103 } else {
104 retval = aice_read_register(aice, mapped_regnum, &val);
105 }
106 buf_set_u32(reg_arch_info->value, 0, 32, val);
107
108 LOG_DEBUG("reading register %" PRIi32 "(%s), value: 0x%8.8" PRIx32,
109 reg_arch_info->num, reg->name, val);
110 }
111
112 if (retval == ERROR_OK) {
113 reg->valid = true;
114 reg->dirty = false;
115 }
116
117 return retval;
118 }
119
120 static int nds32_get_core_reg_64(struct reg *reg)
121 {
122 int retval;
123 struct nds32_reg *reg_arch_info = reg->arch_info;
124 struct target *target = reg_arch_info->target;
125 struct nds32 *nds32 = target_to_nds32(target);
126 struct aice_port_s *aice = target_to_aice(target);
127
128 if (target->state != TARGET_HALTED) {
129 LOG_ERROR("Target not halted");
130 return ERROR_TARGET_NOT_HALTED;
131 }
132
133 if (reg->valid)
134 return ERROR_OK;
135
136 if (reg_arch_info->enable == false) {
137 buf_set_u64(reg_arch_info->value, 0, 64, NDS32_REGISTER_DISABLE);
138 retval = ERROR_FAIL;
139 } else {
140 uint64_t val = 0;
141 if ((nds32->fpu_enable == false)
142 && ((reg_arch_info->num >= FD0) && (reg_arch_info->num <= FD31))) {
143 retval = ERROR_OK;
144 } else {
145 retval = aice_read_reg_64(aice, reg_arch_info->num, &val);
146 }
147 buf_set_u64(reg_arch_info->value, 0, 64, val);
148 }
149
150 if (retval == ERROR_OK) {
151 reg->valid = true;
152 reg->dirty = false;
153 }
154
155 return retval;
156 }
157
158 static int nds32_update_psw(struct nds32 *nds32)
159 {
160 uint32_t value_ir0;
161 struct aice_port_s *aice = target_to_aice(nds32->target);
162
163 nds32_get_mapped_reg(nds32, IR0, &value_ir0);
164
165 /* Save data memory endian */
166 if ((value_ir0 >> 5) & 0x1) {
167 nds32->data_endian = TARGET_BIG_ENDIAN;
168 aice_set_data_endian(aice, AICE_BIG_ENDIAN);
169 } else {
170 nds32->data_endian = TARGET_LITTLE_ENDIAN;
171 aice_set_data_endian(aice, AICE_LITTLE_ENDIAN);
172 }
173
174 /* Save translation status */
175 nds32->memory.address_translation = ((value_ir0 >> 7) & 0x1) ? true : false;
176
177 return ERROR_OK;
178 }
179
180 static int nds32_update_mmu_info(struct nds32 *nds32)
181 {
182 uint32_t value;
183
184 /* Update MMU control status */
185 nds32_get_mapped_reg(nds32, MR0, &value);
186 nds32->mmu_config.default_min_page_size = value & 0x1;
187 nds32->mmu_config.multiple_page_size_in_use = (value >> 10) & 0x1;
188
189 return ERROR_OK;
190 }
191
192 static int nds32_update_cache_info(struct nds32 *nds32)
193 {
194 uint32_t value;
195
196 if (nds32_get_mapped_reg(nds32, MR8, &value) == ERROR_OK) {
197 if (value & 0x1)
198 nds32->memory.icache.enable = true;
199 else
200 nds32->memory.icache.enable = false;
201
202 if (value & 0x2)
203 nds32->memory.dcache.enable = true;
204 else
205 nds32->memory.dcache.enable = false;
206 } else {
207 nds32->memory.icache.enable = false;
208 nds32->memory.dcache.enable = false;
209 }
210
211 return ERROR_OK;
212 }
213
214 static int nds32_update_lm_info(struct nds32 *nds32)
215 {
216 struct nds32_memory *memory = &(nds32->memory);
217 uint32_t value_mr6;
218 uint32_t value_mr7;
219
220 nds32_get_mapped_reg(nds32, MR6, &value_mr6);
221 if (value_mr6 & 0x1)
222 memory->ilm_enable = true;
223 else
224 memory->ilm_enable = false;
225
226 if (memory->ilm_align_ver == 0) { /* 1MB aligned */
227 memory->ilm_start = value_mr6 & 0xFFF00000;
228 memory->ilm_end = memory->ilm_start + memory->ilm_size;
229 } else if (memory->ilm_align_ver == 1) { /* aligned to local memory size */
230 memory->ilm_start = value_mr6 & 0xFFFFFC00;
231 memory->ilm_end = memory->ilm_start + memory->ilm_size;
232 } else {
233 memory->ilm_start = -1;
234 memory->ilm_end = -1;
235 }
236
237 nds32_get_mapped_reg(nds32, MR7, &value_mr7);
238 if (value_mr7 & 0x1)
239 memory->dlm_enable = true;
240 else
241 memory->dlm_enable = false;
242
243 if (memory->dlm_align_ver == 0) { /* 1MB aligned */
244 memory->dlm_start = value_mr7 & 0xFFF00000;
245 memory->dlm_end = memory->dlm_start + memory->dlm_size;
246 } else if (memory->dlm_align_ver == 1) { /* aligned to local memory size */
247 memory->dlm_start = value_mr7 & 0xFFFFFC00;
248 memory->dlm_end = memory->dlm_start + memory->dlm_size;
249 } else {
250 memory->dlm_start = -1;
251 memory->dlm_end = -1;
252 }
253
254 return ERROR_OK;
255 }
256
257 /**
258 * If fpu/audio is disabled, to access fpu/audio registers will cause
259 * exceptions. So, we need to check if fpu/audio is enabled or not as
260 * target is halted. If fpu/audio is disabled, as users access fpu/audio
261 * registers, OpenOCD will return fake value 0 instead of accessing
262 * registers through DIM.
263 */
264 static int nds32_check_extension(struct nds32 *nds32)
265 {
266 uint32_t value;
267
268 nds32_get_mapped_reg(nds32, FUCPR, &value);
269 if (value == NDS32_REGISTER_DISABLE) {
270 nds32->fpu_enable = false;
271 nds32->audio_enable = false;
272 return ERROR_OK;
273 }
274
275 if (value & 0x1)
276 nds32->fpu_enable = true;
277 else
278 nds32->fpu_enable = false;
279
280 if (value & 0x80000000)
281 nds32->audio_enable = true;
282 else
283 nds32->audio_enable = false;
284
285 return ERROR_OK;
286 }
287
288 static int nds32_set_core_reg(struct reg *reg, uint8_t *buf)
289 {
290 struct nds32_reg *reg_arch_info = reg->arch_info;
291 struct target *target = reg_arch_info->target;
292 struct nds32 *nds32 = target_to_nds32(target);
293 struct aice_port_s *aice = target_to_aice(target);
294 uint32_t value = buf_get_u32(buf, 0, 32);
295
296 if (target->state != TARGET_HALTED) {
297 LOG_ERROR("Target not halted");
298 return ERROR_TARGET_NOT_HALTED;
299 }
300
301 int mapped_regnum = nds32->register_map(nds32, reg_arch_info->num);
302
303 /* ignore values that will generate exception */
304 if (nds32_reg_exception(mapped_regnum, value))
305 return ERROR_OK;
306
307 LOG_DEBUG("writing register %" PRIi32 "(%s) with value 0x%8.8" PRIx32,
308 reg_arch_info->num, reg->name, value);
309
310 if ((nds32->fpu_enable == false) &&
311 (nds32_reg_type(mapped_regnum) == NDS32_REG_TYPE_FPU)) {
312
313 buf_set_u32(reg->value, 0, 32, 0);
314 } else if ((nds32->audio_enable == false) &&
315 (nds32_reg_type(mapped_regnum) == NDS32_REG_TYPE_AUMR)) {
316
317 buf_set_u32(reg->value, 0, 32, 0);
318 } else {
319 buf_set_u32(reg->value, 0, 32, value);
320 uint32_t val = buf_get_u32(reg_arch_info->value, 0, 32);
321 aice_write_register(aice, mapped_regnum, val);
322
323 /* After set value to registers, read the value from target
324 * to avoid W1C inconsistency. */
325 aice_read_register(aice, mapped_regnum, &val);
326 buf_set_u32(reg_arch_info->value, 0, 32, val);
327 }
328
329 reg->valid = true;
330 reg->dirty = false;
331
332 /* update registers to take effect right now */
333 if (mapped_regnum == IR0) {
334 nds32_update_psw(nds32);
335 } else if (mapped_regnum == MR0) {
336 nds32_update_mmu_info(nds32);
337 } else if ((mapped_regnum == MR6) || (mapped_regnum == MR7)) {
338 /* update lm information */
339 nds32_update_lm_info(nds32);
340 } else if (mapped_regnum == MR8) {
341 nds32_update_cache_info(nds32);
342 } else if (mapped_regnum == FUCPR) {
343 /* update audio/fpu setting */
344 nds32_check_extension(nds32);
345 }
346
347 return ERROR_OK;
348 }
349
350 static int nds32_set_core_reg_64(struct reg *reg, uint8_t *buf)
351 {
352 struct nds32_reg *reg_arch_info = reg->arch_info;
353 struct target *target = reg_arch_info->target;
354 struct nds32 *nds32 = target_to_nds32(target);
355 uint32_t low_part = buf_get_u32(buf, 0, 32);
356 uint32_t high_part = buf_get_u32(buf, 32, 32);
357
358 if (target->state != TARGET_HALTED) {
359 LOG_ERROR("Target not halted");
360 return ERROR_TARGET_NOT_HALTED;
361 }
362
363 if ((nds32->fpu_enable == false) &&
364 ((reg_arch_info->num >= FD0) && (reg_arch_info->num <= FD31))) {
365
366 buf_set_u32(reg->value, 0, 32, 0);
367 buf_set_u32(reg->value, 32, 32, 0);
368
369 reg->valid = true;
370 reg->dirty = false;
371 } else {
372 buf_set_u32(reg->value, 0, 32, low_part);
373 buf_set_u32(reg->value, 32, 32, high_part);
374
375 reg->valid = true;
376 reg->dirty = true;
377 }
378
379 return ERROR_OK;
380 }
381
382 static const struct reg_arch_type nds32_reg_access_type = {
383 .get = nds32_get_core_reg,
384 .set = nds32_set_core_reg,
385 };
386
387 static const struct reg_arch_type nds32_reg_access_type_64 = {
388 .get = nds32_get_core_reg_64,
389 .set = nds32_set_core_reg_64,
390 };
391
392 static struct reg_cache *nds32_build_reg_cache(struct target *target,
393 struct nds32 *nds32)
394 {
395 struct reg_cache *cache = calloc(sizeof(struct reg_cache), 1);
396 struct reg *reg_list = calloc(TOTAL_REG_NUM, sizeof(struct reg));
397 struct nds32_reg *reg_arch_info = calloc(TOTAL_REG_NUM, sizeof(struct nds32_reg));
398 int i;
399
400 if (!cache || !reg_list || !reg_arch_info) {
401 free(cache);
402 free(reg_list);
403 free(reg_arch_info);
404 return NULL;
405 }
406
407 cache->name = "Andes registers";
408 cache->next = NULL;
409 cache->reg_list = reg_list;
410 cache->num_regs = 0;
411
412 for (i = 0; i < TOTAL_REG_NUM; i++) {
413 reg_arch_info[i].num = i;
414 reg_arch_info[i].target = target;
415 reg_arch_info[i].nds32 = nds32;
416 reg_arch_info[i].enable = false;
417
418 reg_list[i].name = nds32_reg_simple_name(i);
419 reg_list[i].number = reg_arch_info[i].num;
420 reg_list[i].size = nds32_reg_size(i);
421 reg_list[i].arch_info = &reg_arch_info[i];
422
423 reg_list[i].reg_data_type = calloc(sizeof(struct reg_data_type), 1);
424
425 if (reg_arch_info[i].num >= FD0 && reg_arch_info[i].num <= FD31) {
426 reg_list[i].value = reg_arch_info[i].value;
427 reg_list[i].type = &nds32_reg_access_type_64;
428
429 reg_list[i].reg_data_type->type = REG_TYPE_IEEE_DOUBLE;
430 reg_list[i].reg_data_type->id = "ieee_double";
431 reg_list[i].group = "float";
432 } else {
433 reg_list[i].value = reg_arch_info[i].value;
434 reg_list[i].type = &nds32_reg_access_type;
435 reg_list[i].group = "general";
436
437 if ((reg_arch_info[i].num >= FS0) && (reg_arch_info[i].num <= FS31)) {
438 reg_list[i].reg_data_type->type = REG_TYPE_IEEE_SINGLE;
439 reg_list[i].reg_data_type->id = "ieee_single";
440 reg_list[i].group = "float";
441 } else if ((reg_arch_info[i].num == FPCSR) ||
442 (reg_arch_info[i].num == FPCFG)) {
443 reg_list[i].group = "float";
444 } else if ((reg_arch_info[i].num == R28) ||
445 (reg_arch_info[i].num == R29) ||
446 (reg_arch_info[i].num == R31)) {
447 reg_list[i].reg_data_type->type = REG_TYPE_DATA_PTR;
448 reg_list[i].reg_data_type->id = "data_ptr";
449 } else if ((reg_arch_info[i].num == R30) ||
450 (reg_arch_info[i].num == PC)) {
451 reg_list[i].reg_data_type->type = REG_TYPE_CODE_PTR;
452 reg_list[i].reg_data_type->id = "code_ptr";
453 } else {
454 reg_list[i].reg_data_type->type = REG_TYPE_UINT32;
455 reg_list[i].reg_data_type->id = "uint32";
456 }
457 }
458
459 if (reg_arch_info[i].num >= R16 && reg_arch_info[i].num <= R25)
460 reg_list[i].caller_save = true;
461 else
462 reg_list[i].caller_save = false;
463
464 reg_list[i].feature = malloc(sizeof(struct reg_feature));
465
466 if (reg_arch_info[i].num >= R0 && reg_arch_info[i].num <= IFC_LP)
467 reg_list[i].feature->name = "org.gnu.gdb.nds32.core";
468 else if (reg_arch_info[i].num >= CR0 && reg_arch_info[i].num <= SECUR0)
469 reg_list[i].feature->name = "org.gnu.gdb.nds32.system";
470 else if (reg_arch_info[i].num >= D0L24 && reg_arch_info[i].num <= CBE3)
471 reg_list[i].feature->name = "org.gnu.gdb.nds32.audio";
472 else if (reg_arch_info[i].num >= FPCSR && reg_arch_info[i].num <= FD31)
473 reg_list[i].feature->name = "org.gnu.gdb.nds32.fpu";
474
475 cache->num_regs++;
476 }
477
478 nds32->core_cache = cache;
479
480 return cache;
481 }
482
483 static int nds32_reg_cache_init(struct target *target, struct nds32 *nds32)
484 {
485 struct reg_cache *cache;
486
487 cache = nds32_build_reg_cache(target, nds32);
488 if (!cache)
489 return ERROR_FAIL;
490
491 *register_get_last_cache_p(&target->reg_cache) = cache;
492
493 return ERROR_OK;
494 }
495
496 static struct reg *nds32_reg_current(struct nds32 *nds32, unsigned regnum)
497 {
498 struct reg *r;
499
500 r = nds32->core_cache->reg_list + regnum;
501
502 return r;
503 }
504
505 int nds32_full_context(struct nds32 *nds32)
506 {
507 uint32_t value, value_ir0;
508
509 /* save $pc & $psw */
510 nds32_get_mapped_reg(nds32, PC, &value);
511 nds32_get_mapped_reg(nds32, IR0, &value_ir0);
512
513 nds32_update_psw(nds32);
514 nds32_update_mmu_info(nds32);
515 nds32_update_cache_info(nds32);
516 nds32_update_lm_info(nds32);
517
518 nds32_check_extension(nds32);
519
520 return ERROR_OK;
521 }
522
523 /* get register value internally */
524 int nds32_get_mapped_reg(struct nds32 *nds32, unsigned regnum, uint32_t *value)
525 {
526 struct reg_cache *reg_cache = nds32->core_cache;
527 struct reg *r;
528
529 if (regnum > reg_cache->num_regs)
530 return ERROR_FAIL;
531
532 r = nds32_reg_current(nds32, regnum);
533
534 if (r->type->get(r) != ERROR_OK)
535 return ERROR_FAIL;
536
537 *value = buf_get_u32(r->value, 0, 32);
538
539 return ERROR_OK;
540 }
541
542 /** set register internally */
543 int nds32_set_mapped_reg(struct nds32 *nds32, unsigned regnum, uint32_t value)
544 {
545 struct reg_cache *reg_cache = nds32->core_cache;
546 struct reg *r;
547 uint8_t set_value[4];
548
549 if (regnum > reg_cache->num_regs)
550 return ERROR_FAIL;
551
552 r = nds32_reg_current(nds32, regnum);
553
554 buf_set_u32(set_value, 0, 32, value);
555
556 return r->type->set(r, set_value);
557 }
558
559 /** get general register list */
560 static int nds32_get_general_reg_list(struct nds32 *nds32,
561 struct reg **reg_list[], int *reg_list_size)
562 {
563 struct reg *reg_current;
564 int i;
565 int current_idx;
566
567 /** freed in gdb_server.c */
568 *reg_list = malloc(sizeof(struct reg *) * (IFC_LP - R0 + 1));
569 current_idx = 0;
570
571 for (i = R0; i < IFC_LP + 1; i++) {
572 reg_current = nds32_reg_current(nds32, i);
573 if (((struct nds32_reg *)reg_current->arch_info)->enable) {
574 (*reg_list)[current_idx] = reg_current;
575 current_idx++;
576 }
577 }
578 *reg_list_size = current_idx;
579
580 return ERROR_OK;
581 }
582
583 /** get all register list */
584 static int nds32_get_all_reg_list(struct nds32 *nds32,
585 struct reg **reg_list[], int *reg_list_size)
586 {
587 struct reg_cache *reg_cache = nds32->core_cache;
588 struct reg *reg_current;
589 unsigned int i;
590
591 *reg_list_size = reg_cache->num_regs;
592
593 /** freed in gdb_server.c */
594 *reg_list = malloc(sizeof(struct reg *) * (*reg_list_size));
595
596 for (i = 0; i < reg_cache->num_regs; i++) {
597 reg_current = nds32_reg_current(nds32, i);
598 reg_current->exist = ((struct nds32_reg *)
599 reg_current->arch_info)->enable;
600 (*reg_list)[i] = reg_current;
601 }
602
603 return ERROR_OK;
604 }
605
606 /** get all register list */
607 int nds32_get_gdb_reg_list(struct target *target,
608 struct reg **reg_list[], int *reg_list_size,
609 enum target_register_class reg_class)
610 {
611 struct nds32 *nds32 = target_to_nds32(target);
612
613 switch (reg_class) {
614 case REG_CLASS_ALL:
615 return nds32_get_all_reg_list(nds32, reg_list, reg_list_size);
616 case REG_CLASS_GENERAL:
617 return nds32_get_general_reg_list(nds32, reg_list, reg_list_size);
618 default:
619 return ERROR_FAIL;
620 }
621
622 return ERROR_FAIL;
623 }
624
625 static int nds32_select_memory_mode(struct target *target, uint32_t address,
626 uint32_t length, uint32_t *end_address)
627 {
628 struct nds32 *nds32 = target_to_nds32(target);
629 struct aice_port_s *aice = target_to_aice(target);
630 struct nds32_memory *memory = &(nds32->memory);
631 struct nds32_edm *edm = &(nds32->edm);
632 uint32_t dlm_start, dlm_end;
633 uint32_t ilm_start, ilm_end;
634 uint32_t address_end = address + length;
635
636 /* init end_address */
637 *end_address = address_end;
638
639 if (memory->access_channel == NDS_MEMORY_ACC_CPU)
640 return ERROR_OK;
641
642 if (edm->access_control == false) {
643 LOG_DEBUG("EDM does not support ACC_CTL");
644 return ERROR_OK;
645 }
646
647 if (edm->direct_access_local_memory == false) {
648 LOG_DEBUG("EDM does not support DALM");
649 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
650 return ERROR_OK;
651 }
652
653 if (memory->mode != NDS_MEMORY_SELECT_AUTO) {
654 LOG_DEBUG("Memory mode is not AUTO");
655 return ERROR_OK;
656 }
657
658 /* set default mode */
659 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
660
661 if ((memory->ilm_base != 0) && (memory->ilm_enable == true)) {
662 ilm_start = memory->ilm_start;
663 ilm_end = memory->ilm_end;
664
665 /* case 1, address < ilm_start */
666 if (address < ilm_start) {
667 if (ilm_start < address_end) {
668 /* update end_address to split non-ILM from ILM */
669 *end_address = ilm_start;
670 }
671 /* MEM mode */
672 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
673 } else if ((ilm_start <= address) && (address < ilm_end)) {
674 /* case 2, ilm_start <= address < ilm_end */
675 if (ilm_end < address_end) {
676 /* update end_address to split non-ILM from ILM */
677 *end_address = ilm_end;
678 }
679 /* ILM mode */
680 aice_memory_mode(aice, NDS_MEMORY_SELECT_ILM);
681 } else { /* case 3, ilm_end <= address */
682 /* MEM mode */
683 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
684 }
685
686 return ERROR_OK;
687 } else {
688 LOG_DEBUG("ILM is not enabled");
689 }
690
691 if ((memory->dlm_base != 0) && (memory->dlm_enable == true)) {
692 dlm_start = memory->dlm_start;
693 dlm_end = memory->dlm_end;
694
695 /* case 1, address < dlm_start */
696 if (address < dlm_start) {
697 if (dlm_start < address_end) {
698 /* update end_address to split non-DLM from DLM */
699 *end_address = dlm_start;
700 }
701 /* MEM mode */
702 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
703 } else if ((dlm_start <= address) && (address < dlm_end)) {
704 /* case 2, dlm_start <= address < dlm_end */
705 if (dlm_end < address_end) {
706 /* update end_address to split non-DLM from DLM */
707 *end_address = dlm_end;
708 }
709 /* DLM mode */
710 aice_memory_mode(aice, NDS_MEMORY_SELECT_DLM);
711 } else { /* case 3, dlm_end <= address */
712 /* MEM mode */
713 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
714 }
715
716 return ERROR_OK;
717 } else {
718 LOG_DEBUG("DLM is not enabled");
719 }
720
721 return ERROR_OK;
722 }
723
724 int nds32_read_buffer(struct target *target, uint32_t address,
725 uint32_t size, uint8_t *buffer)
726 {
727 struct nds32 *nds32 = target_to_nds32(target);
728 struct nds32_memory *memory = &(nds32->memory);
729
730 if ((memory->access_channel == NDS_MEMORY_ACC_CPU) &&
731 (target->state != TARGET_HALTED)) {
732 LOG_WARNING("target was not halted");
733 return ERROR_TARGET_NOT_HALTED;
734 }
735
736 LOG_DEBUG("READ BUFFER: ADDR %08" PRIx32 " SIZE %08" PRIx32,
737 address,
738 size);
739
740 int retval = ERROR_OK;
741 struct aice_port_s *aice = target_to_aice(target);
742 uint32_t end_address;
743
744 if (((address % 2) == 0) && (size == 2)) {
745 nds32_select_memory_mode(target, address, 2, &end_address);
746 return aice_read_mem_unit(aice, address, 2, 1, buffer);
747 }
748
749 /* handle unaligned head bytes */
750 if (address % 4) {
751 uint32_t unaligned = 4 - (address % 4);
752
753 if (unaligned > size)
754 unaligned = size;
755
756 nds32_select_memory_mode(target, address, unaligned, &end_address);
757 retval = aice_read_mem_unit(aice, address, 1, unaligned, buffer);
758 if (retval != ERROR_OK)
759 return retval;
760
761 buffer += unaligned;
762 address += unaligned;
763 size -= unaligned;
764 }
765
766 /* handle aligned words */
767 if (size >= 4) {
768 int aligned = size - (size % 4);
769 int read_len;
770
771 do {
772 nds32_select_memory_mode(target, address, aligned, &end_address);
773
774 read_len = end_address - address;
775
776 if (read_len > 8)
777 retval = aice_read_mem_bulk(aice, address, read_len, buffer);
778 else
779 retval = aice_read_mem_unit(aice, address, 4, read_len / 4, buffer);
780
781 if (retval != ERROR_OK)
782 return retval;
783
784 buffer += read_len;
785 address += read_len;
786 size -= read_len;
787 aligned -= read_len;
788
789 } while (aligned != 0);
790 }
791
792 /*prevent byte access when possible (avoid AHB access limitations in some cases)*/
793 if (size >= 2) {
794 int aligned = size - (size % 2);
795 nds32_select_memory_mode(target, address, aligned, &end_address);
796 retval = aice_read_mem_unit(aice, address, 2, aligned / 2, buffer);
797 if (retval != ERROR_OK)
798 return retval;
799
800 buffer += aligned;
801 address += aligned;
802 size -= aligned;
803 }
804 /* handle tail writes of less than 4 bytes */
805 if (size > 0) {
806 nds32_select_memory_mode(target, address, size, &end_address);
807 retval = aice_read_mem_unit(aice, address, 1, size, buffer);
808 if (retval != ERROR_OK)
809 return retval;
810 }
811
812 return ERROR_OK;
813 }
814
815 int nds32_read_memory(struct target *target, uint32_t address,
816 uint32_t size, uint32_t count, uint8_t *buffer)
817 {
818 struct aice_port_s *aice = target_to_aice(target);
819
820 return aice_read_mem_unit(aice, address, size, count, buffer);
821 }
822
823 int nds32_read_phys_memory(struct target *target, target_addr_t address,
824 uint32_t size, uint32_t count, uint8_t *buffer)
825 {
826 struct aice_port_s *aice = target_to_aice(target);
827 struct nds32 *nds32 = target_to_nds32(target);
828 struct nds32_memory *memory = &(nds32->memory);
829 enum nds_memory_access orig_channel;
830 int result;
831
832 /* switch to BUS access mode to skip MMU */
833 orig_channel = memory->access_channel;
834 memory->access_channel = NDS_MEMORY_ACC_BUS;
835 aice_memory_access(aice, memory->access_channel);
836
837 /* The input address is physical address. No need to do address translation. */
838 result = aice_read_mem_unit(aice, address, size, count, buffer);
839
840 /* restore to origin access mode */
841 memory->access_channel = orig_channel;
842 aice_memory_access(aice, memory->access_channel);
843
844 return result;
845 }
846
847 int nds32_write_buffer(struct target *target, uint32_t address,
848 uint32_t size, const uint8_t *buffer)
849 {
850 struct nds32 *nds32 = target_to_nds32(target);
851 struct nds32_memory *memory = &(nds32->memory);
852
853 if ((memory->access_channel == NDS_MEMORY_ACC_CPU) &&
854 (target->state != TARGET_HALTED)) {
855 LOG_WARNING("target was not halted");
856 return ERROR_TARGET_NOT_HALTED;
857 }
858
859 LOG_DEBUG("WRITE BUFFER: ADDR %08" PRIx32 " SIZE %08" PRIx32,
860 address,
861 size);
862
863 struct aice_port_s *aice = target_to_aice(target);
864 int retval = ERROR_OK;
865 uint32_t end_address;
866
867 if (((address % 2) == 0) && (size == 2)) {
868 nds32_select_memory_mode(target, address, 2, &end_address);
869 return aice_write_mem_unit(aice, address, 2, 1, buffer);
870 }
871
872 /* handle unaligned head bytes */
873 if (address % 4) {
874 uint32_t unaligned = 4 - (address % 4);
875
876 if (unaligned > size)
877 unaligned = size;
878
879 nds32_select_memory_mode(target, address, unaligned, &end_address);
880 retval = aice_write_mem_unit(aice, address, 1, unaligned, buffer);
881 if (retval != ERROR_OK)
882 return retval;
883
884 buffer += unaligned;
885 address += unaligned;
886 size -= unaligned;
887 }
888
889 /* handle aligned words */
890 if (size >= 4) {
891 int aligned = size - (size % 4);
892 int write_len;
893
894 do {
895 nds32_select_memory_mode(target, address, aligned, &end_address);
896
897 write_len = end_address - address;
898 if (write_len > 8)
899 retval = aice_write_mem_bulk(aice, address, write_len, buffer);
900 else
901 retval = aice_write_mem_unit(aice, address, 4, write_len / 4, buffer);
902 if (retval != ERROR_OK)
903 return retval;
904
905 buffer += write_len;
906 address += write_len;
907 size -= write_len;
908 aligned -= write_len;
909
910 } while (aligned != 0);
911 }
912
913 /* handle tail writes of less than 4 bytes */
914 if (size > 0) {
915 nds32_select_memory_mode(target, address, size, &end_address);
916 retval = aice_write_mem_unit(aice, address, 1, size, buffer);
917 if (retval != ERROR_OK)
918 return retval;
919 }
920
921 return retval;
922 }
923
924 int nds32_write_memory(struct target *target, uint32_t address,
925 uint32_t size, uint32_t count, const uint8_t *buffer)
926 {
927 struct aice_port_s *aice = target_to_aice(target);
928
929 return aice_write_mem_unit(aice, address, size, count, buffer);
930 }
931
932 int nds32_write_phys_memory(struct target *target, target_addr_t address,
933 uint32_t size, uint32_t count, const uint8_t *buffer)
934 {
935 struct aice_port_s *aice = target_to_aice(target);
936 struct nds32 *nds32 = target_to_nds32(target);
937 struct nds32_memory *memory = &(nds32->memory);
938 enum nds_memory_access orig_channel;
939 int result;
940
941 /* switch to BUS access mode to skip MMU */
942 orig_channel = memory->access_channel;
943 memory->access_channel = NDS_MEMORY_ACC_BUS;
944 aice_memory_access(aice, memory->access_channel);
945
946 /* The input address is physical address. No need to do address translation. */
947 result = aice_write_mem_unit(aice, address, size, count, buffer);
948
949 /* restore to origin access mode */
950 memory->access_channel = orig_channel;
951 aice_memory_access(aice, memory->access_channel);
952
953 return result;
954 }
955
956 int nds32_mmu(struct target *target, int *enabled)
957 {
958 if (target->state != TARGET_HALTED) {
959 LOG_ERROR("%s: target not halted", __func__);
960 return ERROR_TARGET_INVALID;
961 }
962
963 struct nds32 *nds32 = target_to_nds32(target);
964 struct nds32_memory *memory = &(nds32->memory);
965 struct nds32_mmu_config *mmu_config = &(nds32->mmu_config);
966
967 if ((mmu_config->memory_protection == 2) && (memory->address_translation == true))
968 *enabled = 1;
969 else
970 *enabled = 0;
971
972 return ERROR_OK;
973 }
974
975 int nds32_arch_state(struct target *target)
976 {
977 struct nds32 *nds32 = target_to_nds32(target);
978
979 if (nds32->common_magic != NDS32_COMMON_MAGIC) {
980 LOG_ERROR("BUG: called for a non-Andes target");
981 return ERROR_FAIL;
982 }
983
984 uint32_t value_pc, value_psw;
985
986 nds32_get_mapped_reg(nds32, PC, &value_pc);
987 nds32_get_mapped_reg(nds32, IR0, &value_psw);
988
989 LOG_USER("target halted due to %s\n"
990 "psw: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "%s",
991 debug_reason_name(target),
992 value_psw,
993 value_pc,
994 nds32->virtual_hosting ? ", virtual hosting" : "");
995
996 /* save pc value to pseudo register pc */
997 struct reg *reg = register_get_by_name(target->reg_cache, "pc", true);
998 buf_set_u32(reg->value, 0, 32, value_pc);
999
1000 return ERROR_OK;
1001 }
1002
1003 static void nds32_init_must_have_registers(struct nds32 *nds32)
1004 {
1005 struct reg_cache *reg_cache = nds32->core_cache;
1006
1007 /** MUST have general registers */
1008 ((struct nds32_reg *)reg_cache->reg_list[R0].arch_info)->enable = true;
1009 ((struct nds32_reg *)reg_cache->reg_list[R1].arch_info)->enable = true;
1010 ((struct nds32_reg *)reg_cache->reg_list[R2].arch_info)->enable = true;
1011 ((struct nds32_reg *)reg_cache->reg_list[R3].arch_info)->enable = true;
1012 ((struct nds32_reg *)reg_cache->reg_list[R4].arch_info)->enable = true;
1013 ((struct nds32_reg *)reg_cache->reg_list[R5].arch_info)->enable = true;
1014 ((struct nds32_reg *)reg_cache->reg_list[R6].arch_info)->enable = true;
1015 ((struct nds32_reg *)reg_cache->reg_list[R7].arch_info)->enable = true;
1016 ((struct nds32_reg *)reg_cache->reg_list[R8].arch_info)->enable = true;
1017 ((struct nds32_reg *)reg_cache->reg_list[R9].arch_info)->enable = true;
1018 ((struct nds32_reg *)reg_cache->reg_list[R10].arch_info)->enable = true;
1019 ((struct nds32_reg *)reg_cache->reg_list[R15].arch_info)->enable = true;
1020 ((struct nds32_reg *)reg_cache->reg_list[R28].arch_info)->enable = true;
1021 ((struct nds32_reg *)reg_cache->reg_list[R29].arch_info)->enable = true;
1022 ((struct nds32_reg *)reg_cache->reg_list[R30].arch_info)->enable = true;
1023 ((struct nds32_reg *)reg_cache->reg_list[R31].arch_info)->enable = true;
1024 ((struct nds32_reg *)reg_cache->reg_list[PC].arch_info)->enable = true;
1025
1026 /** MUST have configuration system registers */
1027 ((struct nds32_reg *)reg_cache->reg_list[CR0].arch_info)->enable = true;
1028 ((struct nds32_reg *)reg_cache->reg_list[CR1].arch_info)->enable = true;
1029 ((struct nds32_reg *)reg_cache->reg_list[CR2].arch_info)->enable = true;
1030 ((struct nds32_reg *)reg_cache->reg_list[CR3].arch_info)->enable = true;
1031 ((struct nds32_reg *)reg_cache->reg_list[CR4].arch_info)->enable = true;
1032
1033 /** MUST have interrupt system registers */
1034 ((struct nds32_reg *)reg_cache->reg_list[IR0].arch_info)->enable = true;
1035 ((struct nds32_reg *)reg_cache->reg_list[IR1].arch_info)->enable = true;
1036 ((struct nds32_reg *)reg_cache->reg_list[IR3].arch_info)->enable = true;
1037 ((struct nds32_reg *)reg_cache->reg_list[IR4].arch_info)->enable = true;
1038 ((struct nds32_reg *)reg_cache->reg_list[IR6].arch_info)->enable = true;
1039 ((struct nds32_reg *)reg_cache->reg_list[IR9].arch_info)->enable = true;
1040 ((struct nds32_reg *)reg_cache->reg_list[IR11].arch_info)->enable = true;
1041 ((struct nds32_reg *)reg_cache->reg_list[IR14].arch_info)->enable = true;
1042 ((struct nds32_reg *)reg_cache->reg_list[IR15].arch_info)->enable = true;
1043
1044 /** MUST have MMU system registers */
1045 ((struct nds32_reg *)reg_cache->reg_list[MR0].arch_info)->enable = true;
1046
1047 /** MUST have EDM system registers */
1048 ((struct nds32_reg *)reg_cache->reg_list[DR40].arch_info)->enable = true;
1049 ((struct nds32_reg *)reg_cache->reg_list[DR42].arch_info)->enable = true;
1050 }
1051
1052 static int nds32_init_memory_config(struct nds32 *nds32)
1053 {
1054 uint32_t value_cr1; /* ICM_CFG */
1055 uint32_t value_cr2; /* DCM_CFG */
1056 struct nds32_memory *memory = &(nds32->memory);
1057
1058 /* read $cr1 to init instruction memory information */
1059 nds32_get_mapped_reg(nds32, CR1, &value_cr1);
1060 memory->icache.set = value_cr1 & 0x7;
1061 memory->icache.way = (value_cr1 >> 3) & 0x7;
1062 memory->icache.line_size = (value_cr1 >> 6) & 0x7;
1063 memory->icache.lock_support = (value_cr1 >> 9) & 0x1;
1064
1065 memory->ilm_base = (value_cr1 >> 10) & 0x7;
1066 memory->ilm_align_ver = (value_cr1 >> 13) & 0x3;
1067
1068 /* read $cr2 to init data memory information */
1069 nds32_get_mapped_reg(nds32, CR2, &value_cr2);
1070 memory->dcache.set = value_cr2 & 0x7;
1071 memory->dcache.way = (value_cr2 >> 3) & 0x7;
1072 memory->dcache.line_size = (value_cr2 >> 6) & 0x7;
1073 memory->dcache.lock_support = (value_cr2 >> 9) & 0x1;
1074
1075 memory->dlm_base = (value_cr2 >> 10) & 0x7;
1076 memory->dlm_align_ver = (value_cr2 >> 13) & 0x3;
1077
1078 return ERROR_OK;
1079 }
1080
1081 static void nds32_init_config(struct nds32 *nds32)
1082 {
1083 uint32_t value_cr0;
1084 uint32_t value_cr3;
1085 uint32_t value_cr4;
1086 struct nds32_cpu_version *cpu_version = &(nds32->cpu_version);
1087 struct nds32_mmu_config *mmu_config = &(nds32->mmu_config);
1088 struct nds32_misc_config *misc_config = &(nds32->misc_config);
1089
1090 nds32_get_mapped_reg(nds32, CR0, &value_cr0);
1091 nds32_get_mapped_reg(nds32, CR3, &value_cr3);
1092 nds32_get_mapped_reg(nds32, CR4, &value_cr4);
1093
1094 /* config cpu version */
1095 cpu_version->performance_extension = value_cr0 & 0x1;
1096 cpu_version->_16bit_extension = (value_cr0 >> 1) & 0x1;
1097 cpu_version->performance_extension_2 = (value_cr0 >> 2) & 0x1;
1098 cpu_version->cop_fpu_extension = (value_cr0 >> 3) & 0x1;
1099 cpu_version->string_extension = (value_cr0 >> 4) & 0x1;
1100 cpu_version->revision = (value_cr0 >> 16) & 0xFF;
1101 cpu_version->cpu_id_family = (value_cr0 >> 24) & 0xF;
1102 cpu_version->cpu_id_version = (value_cr0 >> 28) & 0xF;
1103
1104 /* config MMU */
1105 mmu_config->memory_protection = value_cr3 & 0x3;
1106 mmu_config->memory_protection_version = (value_cr3 >> 2) & 0x1F;
1107 mmu_config->fully_associative_tlb = (value_cr3 >> 7) & 0x1;
1108 if (mmu_config->fully_associative_tlb) {
1109 mmu_config->tlb_size = (value_cr3 >> 8) & 0x7F;
1110 } else {
1111 mmu_config->tlb_ways = (value_cr3 >> 8) & 0x7;
1112 mmu_config->tlb_sets = (value_cr3 >> 11) & 0x7;
1113 }
1114 mmu_config->_8k_page_support = (value_cr3 >> 15) & 0x1;
1115 mmu_config->extra_page_size_support = (value_cr3 >> 16) & 0xFF;
1116 mmu_config->tlb_lock = (value_cr3 >> 24) & 0x1;
1117 mmu_config->hardware_page_table_walker = (value_cr3 >> 25) & 0x1;
1118 mmu_config->default_endian = (value_cr3 >> 26) & 0x1;
1119 mmu_config->partition_num = (value_cr3 >> 27) & 0x1;
1120 mmu_config->invisible_tlb = (value_cr3 >> 28) & 0x1;
1121 mmu_config->vlpt = (value_cr3 >> 29) & 0x1;
1122 mmu_config->ntme = (value_cr3 >> 30) & 0x1;
1123 mmu_config->drde = (value_cr3 >> 31) & 0x1;
1124
1125 /* config misc */
1126 misc_config->edm = value_cr4 & 0x1;
1127 misc_config->local_memory_dma = (value_cr4 >> 1) & 0x1;
1128 misc_config->performance_monitor = (value_cr4 >> 2) & 0x1;
1129 misc_config->high_speed_memory_port = (value_cr4 >> 3) & 0x1;
1130 misc_config->debug_tracer = (value_cr4 >> 4) & 0x1;
1131 misc_config->div_instruction = (value_cr4 >> 5) & 0x1;
1132 misc_config->mac_instruction = (value_cr4 >> 6) & 0x1;
1133 misc_config->audio_isa = (value_cr4 >> 7) & 0x3;
1134 misc_config->l2_cache = (value_cr4 >> 9) & 0x1;
1135 misc_config->reduce_register = (value_cr4 >> 10) & 0x1;
1136 misc_config->addr_24 = (value_cr4 >> 11) & 0x1;
1137 misc_config->interruption_level = (value_cr4 >> 12) & 0x1;
1138 misc_config->baseline_instruction = (value_cr4 >> 13) & 0x7;
1139 misc_config->no_dx_register = (value_cr4 >> 16) & 0x1;
1140 misc_config->implement_dependant_register = (value_cr4 >> 17) & 0x1;
1141 misc_config->implement_dependant_sr_encoding = (value_cr4 >> 18) & 0x1;
1142 misc_config->ifc = (value_cr4 >> 19) & 0x1;
1143 misc_config->mcu = (value_cr4 >> 20) & 0x1;
1144 misc_config->shadow = (value_cr4 >> 21) & 0x7;
1145 misc_config->ex9 = (value_cr4 >> 24) & 0x1;
1146
1147 nds32_init_memory_config(nds32);
1148 }
1149
1150 static int nds32_init_option_registers(struct nds32 *nds32)
1151 {
1152 struct reg_cache *reg_cache = nds32->core_cache;
1153 struct nds32_cpu_version *cpu_version = &(nds32->cpu_version);
1154 struct nds32_mmu_config *mmu_config = &(nds32->mmu_config);
1155 struct nds32_misc_config *misc_config = &(nds32->misc_config);
1156 struct nds32_memory *memory_config = &(nds32->memory);
1157
1158 bool no_cr5;
1159 bool mr10_exist;
1160 bool no_racr0;
1161
1162 if (((cpu_version->cpu_id_family == 0xC) || (cpu_version->cpu_id_family == 0xD)) &&
1163 ((cpu_version->revision & 0xFC) == 0)) {
1164 no_cr5 = true;
1165 mr10_exist = true;
1166 no_racr0 = true;
1167 } else {
1168 no_cr5 = false;
1169 mr10_exist = false;
1170 no_racr0 = false;
1171 }
1172
1173 if (misc_config->reduce_register == false) {
1174 ((struct nds32_reg *)reg_cache->reg_list[R11].arch_info)->enable = true;
1175 ((struct nds32_reg *)reg_cache->reg_list[R12].arch_info)->enable = true;
1176 ((struct nds32_reg *)reg_cache->reg_list[R13].arch_info)->enable = true;
1177 ((struct nds32_reg *)reg_cache->reg_list[R14].arch_info)->enable = true;
1178 ((struct nds32_reg *)reg_cache->reg_list[R16].arch_info)->enable = true;
1179 ((struct nds32_reg *)reg_cache->reg_list[R17].arch_info)->enable = true;
1180 ((struct nds32_reg *)reg_cache->reg_list[R18].arch_info)->enable = true;
1181 ((struct nds32_reg *)reg_cache->reg_list[R19].arch_info)->enable = true;
1182 ((struct nds32_reg *)reg_cache->reg_list[R20].arch_info)->enable = true;
1183 ((struct nds32_reg *)reg_cache->reg_list[R21].arch_info)->enable = true;
1184 ((struct nds32_reg *)reg_cache->reg_list[R22].arch_info)->enable = true;
1185 ((struct nds32_reg *)reg_cache->reg_list[R23].arch_info)->enable = true;
1186 ((struct nds32_reg *)reg_cache->reg_list[R24].arch_info)->enable = true;
1187 ((struct nds32_reg *)reg_cache->reg_list[R25].arch_info)->enable = true;
1188 ((struct nds32_reg *)reg_cache->reg_list[R26].arch_info)->enable = true;
1189 ((struct nds32_reg *)reg_cache->reg_list[R27].arch_info)->enable = true;
1190 }
1191
1192 if (misc_config->no_dx_register == false) {
1193 ((struct nds32_reg *)reg_cache->reg_list[D0LO].arch_info)->enable = true;
1194 ((struct nds32_reg *)reg_cache->reg_list[D0HI].arch_info)->enable = true;
1195 ((struct nds32_reg *)reg_cache->reg_list[D1LO].arch_info)->enable = true;
1196 ((struct nds32_reg *)reg_cache->reg_list[D1HI].arch_info)->enable = true;
1197 }
1198
1199 if (misc_config->ex9)
1200 ((struct nds32_reg *)reg_cache->reg_list[ITB].arch_info)->enable = true;
1201
1202 if (no_cr5 == false)
1203 ((struct nds32_reg *)reg_cache->reg_list[CR5].arch_info)->enable = true;
1204
1205 if (cpu_version->cop_fpu_extension) {
1206 ((struct nds32_reg *)reg_cache->reg_list[CR6].arch_info)->enable = true;
1207 ((struct nds32_reg *)reg_cache->reg_list[FPCSR].arch_info)->enable = true;
1208 ((struct nds32_reg *)reg_cache->reg_list[FPCFG].arch_info)->enable = true;
1209 }
1210
1211 if (mmu_config->memory_protection == 1) {
1212 /* Secure MPU has no IPC, IPSW, P_ITYPE */
1213 ((struct nds32_reg *)reg_cache->reg_list[IR1].arch_info)->enable = false;
1214 ((struct nds32_reg *)reg_cache->reg_list[IR9].arch_info)->enable = false;
1215 }
1216
1217 if (nds32->privilege_level != 0)
1218 ((struct nds32_reg *)reg_cache->reg_list[IR3].arch_info)->enable = false;
1219
1220 if (misc_config->mcu == true)
1221 ((struct nds32_reg *)reg_cache->reg_list[IR4].arch_info)->enable = false;
1222
1223 if (misc_config->interruption_level == false) {
1224 ((struct nds32_reg *)reg_cache->reg_list[IR2].arch_info)->enable = true;
1225 ((struct nds32_reg *)reg_cache->reg_list[IR5].arch_info)->enable = true;
1226 ((struct nds32_reg *)reg_cache->reg_list[IR10].arch_info)->enable = true;
1227 ((struct nds32_reg *)reg_cache->reg_list[IR12].arch_info)->enable = true;
1228 ((struct nds32_reg *)reg_cache->reg_list[IR13].arch_info)->enable = true;
1229
1230 /* Secure MPU has no IPC, IPSW, P_ITYPE */
1231 if (mmu_config->memory_protection != 1)
1232 ((struct nds32_reg *)reg_cache->reg_list[IR7].arch_info)->enable = true;
1233 }
1234
1235 if ((cpu_version->cpu_id_family == 0x9) ||
1236 (cpu_version->cpu_id_family == 0xA) ||
1237 (cpu_version->cpu_id_family == 0xC) ||
1238 (cpu_version->cpu_id_family == 0xD))
1239 ((struct nds32_reg *)reg_cache->reg_list[IR8].arch_info)->enable = true;
1240
1241 if (misc_config->shadow == 1) {
1242 ((struct nds32_reg *)reg_cache->reg_list[IR16].arch_info)->enable = true;
1243 ((struct nds32_reg *)reg_cache->reg_list[IR17].arch_info)->enable = true;
1244 }
1245
1246 if (misc_config->ifc)
1247 ((struct nds32_reg *)reg_cache->reg_list[IFC_LP].arch_info)->enable = true;
1248
1249 if (nds32->privilege_level != 0)
1250 ((struct nds32_reg *)reg_cache->reg_list[MR0].arch_info)->enable = false;
1251
1252 if (mmu_config->memory_protection == 1) {
1253 if (mmu_config->memory_protection_version == 24)
1254 ((struct nds32_reg *)reg_cache->reg_list[MR4].arch_info)->enable = true;
1255
1256 if (nds32->privilege_level == 0) {
1257 if ((mmu_config->memory_protection_version == 16) ||
1258 (mmu_config->memory_protection_version == 24)) {
1259 ((struct nds32_reg *)reg_cache->reg_list[MR11].arch_info)->enable = true;
1260 ((struct nds32_reg *)reg_cache->reg_list[SECUR0].arch_info)->enable = true;
1261 ((struct nds32_reg *)reg_cache->reg_list[IR20].arch_info)->enable = true;
1262 ((struct nds32_reg *)reg_cache->reg_list[IR22].arch_info)->enable = true;
1263 ((struct nds32_reg *)reg_cache->reg_list[IR24].arch_info)->enable = true;
1264 ((struct nds32_reg *)reg_cache->reg_list[IR30].arch_info)->enable = true;
1265
1266 if (misc_config->shadow == 1) {
1267 ((struct nds32_reg *)reg_cache->reg_list[IR21].arch_info)->enable = true;
1268 ((struct nds32_reg *)reg_cache->reg_list[IR23].arch_info)->enable = true;
1269 ((struct nds32_reg *)reg_cache->reg_list[IR25].arch_info)->enable = true;
1270 }
1271 }
1272 }
1273 } else if (mmu_config->memory_protection == 2) {
1274 ((struct nds32_reg *)reg_cache->reg_list[MR1].arch_info)->enable = true;
1275 ((struct nds32_reg *)reg_cache->reg_list[MR4].arch_info)->enable = true;
1276
1277 if ((cpu_version->cpu_id_family != 0xA) && (cpu_version->cpu_id_family != 0xC) &&
1278 (cpu_version->cpu_id_family != 0xD))
1279 ((struct nds32_reg *)reg_cache->reg_list[MR5].arch_info)->enable = true;
1280 }
1281
1282 if (mmu_config->memory_protection > 0) {
1283 ((struct nds32_reg *)reg_cache->reg_list[MR2].arch_info)->enable = true;
1284 ((struct nds32_reg *)reg_cache->reg_list[MR3].arch_info)->enable = true;
1285 }
1286
1287 if (memory_config->ilm_base != 0)
1288 if (nds32->privilege_level == 0)
1289 ((struct nds32_reg *)reg_cache->reg_list[MR6].arch_info)->enable = true;
1290
1291 if (memory_config->dlm_base != 0)
1292 if (nds32->privilege_level == 0)
1293 ((struct nds32_reg *)reg_cache->reg_list[MR7].arch_info)->enable = true;
1294
1295 if ((memory_config->icache.line_size != 0) && (memory_config->dcache.line_size != 0))
1296 ((struct nds32_reg *)reg_cache->reg_list[MR8].arch_info)->enable = true;
1297
1298 if (misc_config->high_speed_memory_port)
1299 ((struct nds32_reg *)reg_cache->reg_list[MR9].arch_info)->enable = true;
1300
1301 if (mr10_exist)
1302 ((struct nds32_reg *)reg_cache->reg_list[MR10].arch_info)->enable = true;
1303
1304 if (misc_config->edm) {
1305 int dr_reg_n = nds32->edm.breakpoint_num * 5;
1306
1307 for (int i = 0 ; i < dr_reg_n ; i++)
1308 ((struct nds32_reg *)reg_cache->reg_list[DR0 + i].arch_info)->enable = true;
1309
1310 ((struct nds32_reg *)reg_cache->reg_list[DR41].arch_info)->enable = true;
1311 ((struct nds32_reg *)reg_cache->reg_list[DR43].arch_info)->enable = true;
1312 ((struct nds32_reg *)reg_cache->reg_list[DR44].arch_info)->enable = true;
1313 ((struct nds32_reg *)reg_cache->reg_list[DR45].arch_info)->enable = true;
1314 }
1315
1316 if (misc_config->debug_tracer) {
1317 ((struct nds32_reg *)reg_cache->reg_list[DR46].arch_info)->enable = true;
1318 ((struct nds32_reg *)reg_cache->reg_list[DR47].arch_info)->enable = true;
1319 }
1320
1321 if (misc_config->performance_monitor) {
1322 ((struct nds32_reg *)reg_cache->reg_list[PFR0].arch_info)->enable = true;
1323 ((struct nds32_reg *)reg_cache->reg_list[PFR1].arch_info)->enable = true;
1324 ((struct nds32_reg *)reg_cache->reg_list[PFR2].arch_info)->enable = true;
1325 ((struct nds32_reg *)reg_cache->reg_list[PFR3].arch_info)->enable = true;
1326 }
1327
1328 if (misc_config->local_memory_dma) {
1329 ((struct nds32_reg *)reg_cache->reg_list[DMAR0].arch_info)->enable = true;
1330 ((struct nds32_reg *)reg_cache->reg_list[DMAR1].arch_info)->enable = true;
1331 ((struct nds32_reg *)reg_cache->reg_list[DMAR2].arch_info)->enable = true;
1332 ((struct nds32_reg *)reg_cache->reg_list[DMAR3].arch_info)->enable = true;
1333 ((struct nds32_reg *)reg_cache->reg_list[DMAR4].arch_info)->enable = true;
1334 ((struct nds32_reg *)reg_cache->reg_list[DMAR5].arch_info)->enable = true;
1335 ((struct nds32_reg *)reg_cache->reg_list[DMAR6].arch_info)->enable = true;
1336 ((struct nds32_reg *)reg_cache->reg_list[DMAR7].arch_info)->enable = true;
1337 ((struct nds32_reg *)reg_cache->reg_list[DMAR8].arch_info)->enable = true;
1338 ((struct nds32_reg *)reg_cache->reg_list[DMAR9].arch_info)->enable = true;
1339 ((struct nds32_reg *)reg_cache->reg_list[DMAR10].arch_info)->enable = true;
1340 }
1341
1342 if ((misc_config->local_memory_dma || misc_config->performance_monitor) &&
1343 (no_racr0 == false))
1344 ((struct nds32_reg *)reg_cache->reg_list[RACR].arch_info)->enable = true;
1345
1346 if (cpu_version->cop_fpu_extension || (misc_config->audio_isa != 0))
1347 ((struct nds32_reg *)reg_cache->reg_list[FUCPR].arch_info)->enable = true;
1348
1349 if (misc_config->audio_isa != 0) {
1350 if (misc_config->audio_isa > 1) {
1351 ((struct nds32_reg *)reg_cache->reg_list[D0L24].arch_info)->enable = true;
1352 ((struct nds32_reg *)reg_cache->reg_list[D1L24].arch_info)->enable = true;
1353 }
1354
1355 ((struct nds32_reg *)reg_cache->reg_list[I0].arch_info)->enable = true;
1356 ((struct nds32_reg *)reg_cache->reg_list[I1].arch_info)->enable = true;
1357 ((struct nds32_reg *)reg_cache->reg_list[I2].arch_info)->enable = true;
1358 ((struct nds32_reg *)reg_cache->reg_list[I3].arch_info)->enable = true;
1359 ((struct nds32_reg *)reg_cache->reg_list[I4].arch_info)->enable = true;
1360 ((struct nds32_reg *)reg_cache->reg_list[I5].arch_info)->enable = true;
1361 ((struct nds32_reg *)reg_cache->reg_list[I6].arch_info)->enable = true;
1362 ((struct nds32_reg *)reg_cache->reg_list[I7].arch_info)->enable = true;
1363 ((struct nds32_reg *)reg_cache->reg_list[M1].arch_info)->enable = true;
1364 ((struct nds32_reg *)reg_cache->reg_list[M2].arch_info)->enable = true;
1365 ((struct nds32_reg *)reg_cache->reg_list[M3].arch_info)->enable = true;
1366 ((struct nds32_reg *)reg_cache->reg_list[M5].arch_info)->enable = true;
1367 ((struct nds32_reg *)reg_cache->reg_list[M6].arch_info)->enable = true;
1368 ((struct nds32_reg *)reg_cache->reg_list[M7].arch_info)->enable = true;
1369 ((struct nds32_reg *)reg_cache->reg_list[MOD].arch_info)->enable = true;
1370 ((struct nds32_reg *)reg_cache->reg_list[LBE].arch_info)->enable = true;
1371 ((struct nds32_reg *)reg_cache->reg_list[LE].arch_info)->enable = true;
1372 ((struct nds32_reg *)reg_cache->reg_list[LC].arch_info)->enable = true;
1373 ((struct nds32_reg *)reg_cache->reg_list[ADM_VBASE].arch_info)->enable = true;
1374 ((struct nds32_reg *)reg_cache->reg_list[SHFT_CTL0].arch_info)->enable = true;
1375 ((struct nds32_reg *)reg_cache->reg_list[SHFT_CTL1].arch_info)->enable = true;
1376
1377 uint32_t value_mod;
1378 uint32_t fucpr_backup;
1379 /* enable fpu and get configuration */
1380 nds32_get_mapped_reg(nds32, FUCPR, &fucpr_backup);
1381 if ((fucpr_backup & 0x80000000) == 0)
1382 nds32_set_mapped_reg(nds32, FUCPR, fucpr_backup | 0x80000000);
1383 nds32_get_mapped_reg(nds32, MOD, &value_mod);
1384 /* restore origin fucpr value */
1385 if ((fucpr_backup & 0x80000000) == 0)
1386 nds32_set_mapped_reg(nds32, FUCPR, fucpr_backup);
1387
1388 if ((value_mod >> 6) & 0x1) {
1389 ((struct nds32_reg *)reg_cache->reg_list[CB_CTL].arch_info)->enable = true;
1390 ((struct nds32_reg *)reg_cache->reg_list[CBB0].arch_info)->enable = true;
1391 ((struct nds32_reg *)reg_cache->reg_list[CBB1].arch_info)->enable = true;
1392 ((struct nds32_reg *)reg_cache->reg_list[CBB2].arch_info)->enable = true;
1393 ((struct nds32_reg *)reg_cache->reg_list[CBB3].arch_info)->enable = true;
1394 ((struct nds32_reg *)reg_cache->reg_list[CBE0].arch_info)->enable = true;
1395 ((struct nds32_reg *)reg_cache->reg_list[CBE1].arch_info)->enable = true;
1396 ((struct nds32_reg *)reg_cache->reg_list[CBE2].arch_info)->enable = true;
1397 ((struct nds32_reg *)reg_cache->reg_list[CBE3].arch_info)->enable = true;
1398 }
1399 }
1400
1401 if ((cpu_version->cpu_id_family == 0x9) ||
1402 (cpu_version->cpu_id_family == 0xA) ||
1403 (cpu_version->cpu_id_family == 0xC)) {
1404
1405 ((struct nds32_reg *)reg_cache->reg_list[IDR0].arch_info)->enable = true;
1406 ((struct nds32_reg *)reg_cache->reg_list[IDR1].arch_info)->enable = true;
1407
1408 if ((cpu_version->cpu_id_family == 0xC) && (cpu_version->revision == 0x0C))
1409 ((struct nds32_reg *)reg_cache->reg_list[IDR0].arch_info)->enable = false;
1410 }
1411
1412 uint32_t ir3_value;
1413 uint32_t ivb_prog_pri_lvl;
1414 uint32_t ivb_ivic_ver;
1415
1416 nds32_get_mapped_reg(nds32, IR3, &ir3_value);
1417 ivb_prog_pri_lvl = ir3_value & 0x1;
1418 ivb_ivic_ver = (ir3_value >> 11) & 0x3;
1419
1420 if ((ivb_prog_pri_lvl == 1) || (ivb_ivic_ver >= 1)) {
1421 ((struct nds32_reg *)reg_cache->reg_list[IR18].arch_info)->enable = true;
1422 ((struct nds32_reg *)reg_cache->reg_list[IR19].arch_info)->enable = true;
1423 }
1424
1425 if (ivb_ivic_ver >= 1) {
1426 ((struct nds32_reg *)reg_cache->reg_list[IR26].arch_info)->enable = true;
1427 ((struct nds32_reg *)reg_cache->reg_list[IR27].arch_info)->enable = true;
1428 ((struct nds32_reg *)reg_cache->reg_list[IR28].arch_info)->enable = true;
1429 ((struct nds32_reg *)reg_cache->reg_list[IR29].arch_info)->enable = true;
1430 }
1431
1432 return ERROR_OK;
1433 }
1434
1435 int nds32_init_register_table(struct nds32 *nds32)
1436 {
1437 nds32_init_must_have_registers(nds32);
1438
1439 return ERROR_OK;
1440 }
1441
1442 int nds32_add_software_breakpoint(struct target *target,
1443 struct breakpoint *breakpoint)
1444 {
1445 uint32_t data;
1446 uint32_t check_data;
1447 uint32_t break_insn;
1448
1449 /* check the breakpoint size */
1450 target->type->read_buffer(target, breakpoint->address, 4, (uint8_t *)&data);
1451
1452 /* backup origin instruction
1453 * instruction is big-endian */
1454 if (*(char *)&data & 0x80) { /* 16-bits instruction */
1455 breakpoint->length = 2;
1456 break_insn = NDS32_BREAK_16;
1457 } else { /* 32-bits instruction */
1458 breakpoint->length = 4;
1459 break_insn = NDS32_BREAK_32;
1460 }
1461
1462 free(breakpoint->orig_instr);
1463
1464 breakpoint->orig_instr = malloc(breakpoint->length);
1465 memcpy(breakpoint->orig_instr, &data, breakpoint->length);
1466
1467 /* self-modified code */
1468 target->type->write_buffer(target, breakpoint->address, breakpoint->length, (const uint8_t *)&break_insn);
1469 /* write_back & invalidate dcache & invalidate icache */
1470 nds32_cache_sync(target, breakpoint->address, breakpoint->length);
1471
1472 /* read back to check */
1473 target->type->read_buffer(target, breakpoint->address, breakpoint->length, (uint8_t *)&check_data);
1474 if (memcmp(&check_data, &break_insn, breakpoint->length) == 0)
1475 return ERROR_OK;
1476
1477 return ERROR_FAIL;
1478 }
1479
1480 int nds32_remove_software_breakpoint(struct target *target,
1481 struct breakpoint *breakpoint)
1482 {
1483 uint32_t check_data;
1484 uint32_t break_insn;
1485
1486 if (breakpoint->length == 2)
1487 break_insn = NDS32_BREAK_16;
1488 else if (breakpoint->length == 4)
1489 break_insn = NDS32_BREAK_32;
1490 else
1491 return ERROR_FAIL;
1492
1493 target->type->read_buffer(target, breakpoint->address, breakpoint->length,
1494 (uint8_t *)&check_data);
1495
1496 /* break instruction is modified */
1497 if (memcmp(&check_data, &break_insn, breakpoint->length) != 0)
1498 return ERROR_FAIL;
1499
1500 /* self-modified code */
1501 target->type->write_buffer(target, breakpoint->address, breakpoint->length,
1502 breakpoint->orig_instr);
1503
1504 /* write_back & invalidate dcache & invalidate icache */
1505 nds32_cache_sync(target, breakpoint->address, breakpoint->length);
1506
1507 return ERROR_OK;
1508 }
1509
1510 /**
1511 * Restore the processor context on an Andes target. The full processor
1512 * context is analyzed to see if any of the registers are dirty on this end, but
1513 * have a valid new value. If this is the case, the processor is changed to the
1514 * appropriate mode and the new register values are written out to the
1515 * processor. If there happens to be a dirty register with an invalid value, an
1516 * error will be logged.
1517 *
1518 * @param target Pointer to the Andes target to have its context restored
1519 * @return Error status if the target is not halted.
1520 */
1521 int nds32_restore_context(struct target *target)
1522 {
1523 struct nds32 *nds32 = target_to_nds32(target);
1524 struct aice_port_s *aice = target_to_aice(target);
1525 struct reg_cache *reg_cache = nds32->core_cache;
1526 struct reg *reg;
1527 struct nds32_reg *reg_arch_info;
1528 unsigned int i;
1529
1530 LOG_DEBUG("-");
1531
1532 if (target->state != TARGET_HALTED) {
1533 LOG_WARNING("target not halted");
1534 return ERROR_TARGET_NOT_HALTED;
1535 }
1536
1537 /* check if there are dirty registers */
1538 for (i = 0; i < reg_cache->num_regs; i++) {
1539 reg = &(reg_cache->reg_list[i]);
1540 if (reg->dirty == true) {
1541 if (reg->valid == true) {
1542
1543 LOG_DEBUG("examining dirty reg: %s", reg->name);
1544 LOG_DEBUG("writing register %d with value 0x%8.8" PRIx32,
1545 i, buf_get_u32(reg->value, 0, 32));
1546
1547 reg_arch_info = reg->arch_info;
1548 if (reg_arch_info->num >= FD0 && reg_arch_info->num <= FD31) {
1549 uint64_t val = buf_get_u64(reg_arch_info->value, 0, 64);
1550 aice_write_reg_64(aice, reg_arch_info->num, val);
1551 } else {
1552 uint32_t val = buf_get_u32(reg_arch_info->value, 0, 32);
1553 aice_write_register(aice, reg_arch_info->num, val);
1554 }
1555
1556 reg->valid = true;
1557 reg->dirty = false;
1558 }
1559 }
1560 }
1561
1562 return ERROR_OK;
1563 }
1564
1565 int nds32_edm_config(struct nds32 *nds32)
1566 {
1567 struct target *target = nds32->target;
1568 struct aice_port_s *aice = target_to_aice(target);
1569 uint32_t edm_cfg;
1570 uint32_t edm_ctl;
1571
1572 aice_read_debug_reg(aice, NDS_EDM_SR_EDM_CFG, &edm_cfg);
1573
1574 nds32->edm.version = (edm_cfg >> 16) & 0xFFFF;
1575 LOG_INFO("EDM version 0x%04x", nds32->edm.version);
1576
1577 nds32->edm.breakpoint_num = (edm_cfg & 0x7) + 1;
1578
1579 if ((nds32->edm.version & 0x1000) || (nds32->edm.version >= 0x60))
1580 nds32->edm.access_control = true;
1581 else
1582 nds32->edm.access_control = false;
1583
1584 if ((edm_cfg >> 4) & 0x1)
1585 nds32->edm.direct_access_local_memory = true;
1586 else
1587 nds32->edm.direct_access_local_memory = false;
1588
1589 if (nds32->edm.version <= 0x20)
1590 nds32->edm.direct_access_local_memory = false;
1591
1592 aice_read_debug_reg(aice, NDS_EDM_SR_EDM_CTL, &edm_ctl);
1593 if (edm_ctl & (0x1 << 29))
1594 nds32->edm.support_max_stop = true;
1595 else
1596 nds32->edm.support_max_stop = false;
1597
1598 /* set passcode for secure MCU */
1599 nds32_login(nds32);
1600
1601 return ERROR_OK;
1602 }
1603
1604 int nds32_config(struct nds32 *nds32)
1605 {
1606 nds32_init_config(nds32);
1607
1608 /* init optional system registers according to config registers */
1609 nds32_init_option_registers(nds32);
1610
1611 /* get max interrupt level */
1612 if (nds32->misc_config.interruption_level)
1613 nds32->max_interrupt_level = 2;
1614 else
1615 nds32->max_interrupt_level = 3;
1616
1617 /* get ILM/DLM size from MR6/MR7 */
1618 uint32_t value_mr6, value_mr7;
1619 uint32_t size_index;
1620 nds32_get_mapped_reg(nds32, MR6, &value_mr6);
1621 size_index = (value_mr6 >> 1) & 0xF;
1622 nds32->memory.ilm_size = nds32_lm_size_table[size_index];
1623
1624 nds32_get_mapped_reg(nds32, MR7, &value_mr7);
1625 size_index = (value_mr7 >> 1) & 0xF;
1626 nds32->memory.dlm_size = nds32_lm_size_table[size_index];
1627
1628 return ERROR_OK;
1629 }
1630
1631 int nds32_init_arch_info(struct target *target, struct nds32 *nds32)
1632 {
1633 target->arch_info = nds32;
1634 nds32->target = target;
1635
1636 nds32->common_magic = NDS32_COMMON_MAGIC;
1637 nds32->init_arch_info_after_halted = false;
1638 nds32->auto_convert_hw_bp = true;
1639 nds32->global_stop = false;
1640 nds32->soft_reset_halt = false;
1641 nds32->edm_passcode = NULL;
1642 nds32->privilege_level = 0;
1643 nds32->boot_time = 1500;
1644 nds32->reset_halt_as_examine = false;
1645 nds32->keep_target_edm_ctl = false;
1646 nds32->word_access_mem = false;
1647 nds32->virtual_hosting = true;
1648 nds32->hit_syscall = false;
1649 nds32->active_syscall_id = NDS32_SYSCALL_UNDEFINED;
1650 nds32->virtual_hosting_errno = 0;
1651 nds32->virtual_hosting_ctrl_c = false;
1652 nds32->attached = false;
1653
1654 nds32->syscall_break.asid = 0;
1655 nds32->syscall_break.length = 4;
1656 nds32->syscall_break.set = 0;
1657 nds32->syscall_break.orig_instr = NULL;
1658 nds32->syscall_break.next = NULL;
1659 nds32->syscall_break.unique_id = 0x515CAll + target->target_number;
1660 nds32->syscall_break.linked_brp = 0;
1661
1662 nds32_reg_init();
1663
1664 if (nds32_reg_cache_init(target, nds32) == ERROR_FAIL)
1665 return ERROR_FAIL;
1666
1667 if (nds32_init_register_table(nds32) != ERROR_OK)
1668 return ERROR_FAIL;
1669
1670 return ERROR_OK;
1671 }
1672
1673 int nds32_virtual_to_physical(struct target *target, target_addr_t address, target_addr_t *physical)
1674 {
1675 struct nds32 *nds32 = target_to_nds32(target);
1676
1677 if (nds32->memory.address_translation == false) {
1678 *physical = address;
1679 return ERROR_OK;
1680 }
1681
1682 if (nds32_probe_tlb(nds32, address, physical) == ERROR_OK)
1683 return ERROR_OK;
1684
1685 if (nds32_walk_page_table(nds32, address, physical) == ERROR_OK)
1686 return ERROR_OK;
1687
1688 return ERROR_FAIL;
1689 }
1690
1691 int nds32_cache_sync(struct target *target, target_addr_t address, uint32_t length)
1692 {
1693 struct aice_port_s *aice = target_to_aice(target);
1694 struct nds32 *nds32 = target_to_nds32(target);
1695 struct nds32_cache *dcache = &(nds32->memory.dcache);
1696 struct nds32_cache *icache = &(nds32->memory.icache);
1697 uint32_t dcache_line_size = nds32_line_size_table[dcache->line_size];
1698 uint32_t icache_line_size = nds32_line_size_table[icache->line_size];
1699 uint32_t cur_address;
1700 int result;
1701 uint32_t start_line, end_line;
1702 uint32_t cur_line;
1703
1704 if ((dcache->line_size != 0) && (dcache->enable == true)) {
1705 /* address / dcache_line_size */
1706 start_line = address >> (dcache->line_size + 2);
1707 /* (address + length - 1) / dcache_line_size */
1708 end_line = (address + length - 1) >> (dcache->line_size + 2);
1709
1710 for (cur_address = address, cur_line = start_line;
1711 cur_line <= end_line;
1712 cur_address += dcache_line_size, cur_line++) {
1713 /* D$ write back */
1714 result = aice_cache_ctl(aice, AICE_CACHE_CTL_L1D_VA_WB, cur_address);
1715 if (result != ERROR_OK)
1716 return result;
1717
1718 /* D$ invalidate */
1719 result = aice_cache_ctl(aice, AICE_CACHE_CTL_L1D_VA_INVAL, cur_address);
1720 if (result != ERROR_OK)
1721 return result;
1722 }
1723 }
1724
1725 if ((icache->line_size != 0) && (icache->enable == true)) {
1726 /* address / icache_line_size */
1727 start_line = address >> (icache->line_size + 2);
1728 /* (address + length - 1) / icache_line_size */
1729 end_line = (address + length - 1) >> (icache->line_size + 2);
1730
1731 for (cur_address = address, cur_line = start_line;
1732 cur_line <= end_line;
1733 cur_address += icache_line_size, cur_line++) {
1734 /* Because PSW.IT is turned off under debug exception, address MUST
1735 * be physical address. L1I_VA_INVALIDATE uses PSW.IT to decide
1736 * address translation or not. */
1737 target_addr_t physical_addr;
1738 if (target->type->virt2phys(target, cur_address, &physical_addr) == ERROR_FAIL)
1739 return ERROR_FAIL;
1740
1741 /* I$ invalidate */
1742 result = aice_cache_ctl(aice, AICE_CACHE_CTL_L1I_VA_INVAL, physical_addr);
1743 if (result != ERROR_OK)
1744 return result;
1745 }
1746 }
1747
1748 return ERROR_OK;
1749 }
1750
1751 uint32_t nds32_nextpc(struct nds32 *nds32, int current, uint32_t address)
1752 {
1753 if (!current)
1754 nds32_set_mapped_reg(nds32, PC, address);
1755 else
1756 nds32_get_mapped_reg(nds32, PC, &address);
1757
1758 return address;
1759 }
1760
1761 int nds32_step(struct target *target, int current,
1762 target_addr_t address, int handle_breakpoints)
1763 {
1764 LOG_DEBUG("target->state: %s",
1765 target_state_name(target));
1766
1767 if (target->state != TARGET_HALTED) {
1768 LOG_WARNING("target was not halted");
1769 return ERROR_TARGET_NOT_HALTED;
1770 }
1771
1772 struct nds32 *nds32 = target_to_nds32(target);
1773
1774 address = nds32_nextpc(nds32, current, address);
1775
1776 LOG_DEBUG("STEP PC %08" TARGET_PRIxADDR "%s", address, !current ? "!" : "");
1777
1778 /** set DSSIM */
1779 uint32_t ir14_value;
1780 nds32_get_mapped_reg(nds32, IR14, &ir14_value);
1781 if (nds32->step_isr_enable)
1782 ir14_value |= (0x1 << 31);
1783 else
1784 ir14_value &= ~(0x1 << 31);
1785 nds32_set_mapped_reg(nds32, IR14, ir14_value);
1786
1787 /* check hit_syscall before leave_debug_state() because
1788 * leave_debug_state() may clear hit_syscall flag */
1789 bool no_step = false;
1790 if (nds32->hit_syscall)
1791 /* step after hit_syscall should be ignored because
1792 * leave_debug_state will step implicitly to skip the
1793 * syscall */
1794 no_step = true;
1795
1796 /********* TODO: maybe create another function to handle this part */
1797 CHECK_RETVAL(nds32->leave_debug_state(nds32, true));
1798 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_RESUMED));
1799
1800 if (no_step == false) {
1801 struct aice_port_s *aice = target_to_aice(target);
1802 if (aice_step(aice) != ERROR_OK)
1803 return ERROR_FAIL;
1804 }
1805
1806 /* save state */
1807 CHECK_RETVAL(nds32->enter_debug_state(nds32, true));
1808 /********* TODO: maybe create another function to handle this part */
1809
1810 /* restore DSSIM */
1811 if (nds32->step_isr_enable) {
1812 nds32_get_mapped_reg(nds32, IR14, &ir14_value);
1813 ir14_value &= ~(0x1 << 31);
1814 nds32_set_mapped_reg(nds32, IR14, ir14_value);
1815 }
1816
1817 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_HALTED));
1818
1819 return ERROR_OK;
1820 }
1821
1822 static int nds32_step_without_watchpoint(struct nds32 *nds32)
1823 {
1824 struct target *target = nds32->target;
1825
1826 if (target->state != TARGET_HALTED) {
1827 LOG_WARNING("target was not halted");
1828 return ERROR_TARGET_NOT_HALTED;
1829 }
1830
1831 /** set DSSIM */
1832 uint32_t ir14_value;
1833 nds32_get_mapped_reg(nds32, IR14, &ir14_value);
1834 if (nds32->step_isr_enable)
1835 ir14_value |= (0x1 << 31);
1836 else
1837 ir14_value &= ~(0x1 << 31);
1838 nds32_set_mapped_reg(nds32, IR14, ir14_value);
1839
1840 /********* TODO: maybe create another function to handle this part */
1841 CHECK_RETVAL(nds32->leave_debug_state(nds32, false));
1842
1843 struct aice_port_s *aice = target_to_aice(target);
1844
1845 if (aice_step(aice) != ERROR_OK)
1846 return ERROR_FAIL;
1847
1848 /* save state */
1849 CHECK_RETVAL(nds32->enter_debug_state(nds32, false));
1850 /********* TODO: maybe create another function to handle this part */
1851
1852 /* restore DSSIM */
1853 if (nds32->step_isr_enable) {
1854 nds32_get_mapped_reg(nds32, IR14, &ir14_value);
1855 ir14_value &= ~(0x1 << 31);
1856 nds32_set_mapped_reg(nds32, IR14, ir14_value);
1857 }
1858
1859 return ERROR_OK;
1860 }
1861
1862 int nds32_target_state(struct nds32 *nds32, enum target_state *state)
1863 {
1864 struct aice_port_s *aice = target_to_aice(nds32->target);
1865 enum aice_target_state_s nds32_state;
1866
1867 if (aice_state(aice, &nds32_state) != ERROR_OK)
1868 return ERROR_FAIL;
1869
1870 switch (nds32_state) {
1871 case AICE_DISCONNECT:
1872 LOG_INFO("USB is disconnected");
1873 return ERROR_FAIL;
1874 case AICE_TARGET_DETACH:
1875 LOG_INFO("Target is disconnected");
1876 return ERROR_FAIL;
1877 case AICE_TARGET_UNKNOWN:
1878 *state = TARGET_UNKNOWN;
1879 break;
1880 case AICE_TARGET_RUNNING:
1881 *state = TARGET_RUNNING;
1882 break;
1883 case AICE_TARGET_HALTED:
1884 *state = TARGET_HALTED;
1885 break;
1886 case AICE_TARGET_RESET:
1887 *state = TARGET_RESET;
1888 break;
1889 case AICE_TARGET_DEBUG_RUNNING:
1890 *state = TARGET_DEBUG_RUNNING;
1891 break;
1892 default:
1893 return ERROR_FAIL;
1894 }
1895
1896 return ERROR_OK;
1897 }
1898
1899 int nds32_examine_debug_reason(struct nds32 *nds32)
1900 {
1901 uint32_t reason;
1902 struct target *target = nds32->target;
1903
1904 if (nds32->hit_syscall == true) {
1905 LOG_DEBUG("Hit syscall breakpoint");
1906 target->debug_reason = DBG_REASON_BREAKPOINT;
1907 return ERROR_OK;
1908 }
1909
1910 nds32->get_debug_reason(nds32, &reason);
1911
1912 LOG_DEBUG("nds32 examines debug reason: %s", nds32_debug_type_name[reason]);
1913
1914 /* Examine debug reason */
1915 switch (reason) {
1916 case NDS32_DEBUG_BREAK:
1917 case NDS32_DEBUG_BREAK_16:
1918 case NDS32_DEBUG_INST_BREAK:
1919 {
1920 uint32_t value_pc;
1921 uint32_t opcode;
1922 struct nds32_instruction instruction;
1923
1924 nds32_get_mapped_reg(nds32, PC, &value_pc);
1925
1926 if (nds32_read_opcode(nds32, value_pc, &opcode) != ERROR_OK)
1927 return ERROR_FAIL;
1928 if (nds32_evaluate_opcode(nds32, opcode, value_pc, &instruction) != ERROR_OK)
1929 return ERROR_FAIL;
1930
1931 /* hit 'break 0x7FFF' */
1932 if ((instruction.info.opc_6 == 0x32) &&
1933 (instruction.info.sub_opc == 0xA) &&
1934 (instruction.info.imm == 0x7FFF)) {
1935 target->debug_reason = DBG_REASON_EXIT;
1936 } else
1937 target->debug_reason = DBG_REASON_BREAKPOINT;
1938 }
1939 break;
1940 case NDS32_DEBUG_DATA_ADDR_WATCHPOINT_PRECISE:
1941 case NDS32_DEBUG_DATA_VALUE_WATCHPOINT_PRECISE:
1942 case NDS32_DEBUG_LOAD_STORE_GLOBAL_STOP: /* GLOBAL_STOP is precise exception */
1943 {
1944 int result;
1945
1946 result = nds32->get_watched_address(nds32,
1947 &(nds32->watched_address), reason);
1948 /* do single step(without watchpoints) to skip the "watched" instruction */
1949 nds32_step_without_watchpoint(nds32);
1950
1951 /* before single_step, save exception address */
1952 if (result != ERROR_OK)
1953 return ERROR_FAIL;
1954
1955 target->debug_reason = DBG_REASON_WATCHPOINT;
1956 }
1957 break;
1958 case NDS32_DEBUG_DEBUG_INTERRUPT:
1959 target->debug_reason = DBG_REASON_DBGRQ;
1960 break;
1961 case NDS32_DEBUG_HARDWARE_SINGLE_STEP:
1962 target->debug_reason = DBG_REASON_SINGLESTEP;
1963 break;
1964 case NDS32_DEBUG_DATA_VALUE_WATCHPOINT_IMPRECISE:
1965 case NDS32_DEBUG_DATA_ADDR_WATCHPOINT_NEXT_PRECISE:
1966 case NDS32_DEBUG_DATA_VALUE_WATCHPOINT_NEXT_PRECISE:
1967 if (nds32->get_watched_address(nds32, &(nds32->watched_address), reason) != ERROR_OK)
1968 return ERROR_FAIL;
1969
1970 target->debug_reason = DBG_REASON_WATCHPOINT;
1971 break;
1972 default:
1973 target->debug_reason = DBG_REASON_UNDEFINED;
1974 break;
1975 }
1976
1977 return ERROR_OK;
1978 }
1979
1980 int nds32_login(struct nds32 *nds32)
1981 {
1982 struct target *target = nds32->target;
1983 struct aice_port_s *aice = target_to_aice(target);
1984 uint32_t passcode_length;
1985 char command_sequence[129];
1986 char command_str[33];
1987 char code_str[9];
1988 uint32_t copy_length;
1989 uint32_t code;
1990 uint32_t i;
1991
1992 LOG_DEBUG("nds32_login");
1993
1994 if (nds32->edm_passcode) {
1995 /* convert EDM passcode to command sequences */
1996 passcode_length = strlen(nds32->edm_passcode);
1997 command_sequence[0] = '\0';
1998 for (i = 0; i < passcode_length; i += 8) {
1999 if (passcode_length - i < 8)
2000 copy_length = passcode_length - i;
2001 else
2002 copy_length = 8;
2003
2004 strncpy(code_str, nds32->edm_passcode + i, copy_length);
2005 code_str[copy_length] = '\0';
2006 code = strtoul(code_str, NULL, 16);
2007
2008 sprintf(command_str, "write_misc gen_port0 0x%" PRIx32 ";", code);
2009 strcat(command_sequence, command_str);
2010 }
2011
2012 if (aice_program_edm(aice, command_sequence) != ERROR_OK)
2013 return ERROR_FAIL;
2014
2015 /* get current privilege level */
2016 uint32_t value_edmsw;
2017 aice_read_debug_reg(aice, NDS_EDM_SR_EDMSW, &value_edmsw);
2018 nds32->privilege_level = (value_edmsw >> 16) & 0x3;
2019 LOG_INFO("Current privilege level: %d", nds32->privilege_level);
2020 }
2021
2022 if (nds32_edm_ops_num > 0) {
2023 const char *reg_name;
2024 for (i = 0 ; i < nds32_edm_ops_num ; i++) {
2025 code = nds32_edm_ops[i].value;
2026 if (nds32_edm_ops[i].reg_no == 6)
2027 reg_name = "gen_port0";
2028 else if (nds32_edm_ops[i].reg_no == 7)
2029 reg_name = "gen_port1";
2030 else
2031 return ERROR_FAIL;
2032
2033 sprintf(command_str, "write_misc %s 0x%" PRIx32 ";", reg_name, code);
2034 if (aice_program_edm(aice, command_str) != ERROR_OK)
2035 return ERROR_FAIL;
2036 }
2037 }
2038
2039 return ERROR_OK;
2040 }
2041
2042 int nds32_halt(struct target *target)
2043 {
2044 struct nds32 *nds32 = target_to_nds32(target);
2045 struct aice_port_s *aice = target_to_aice(target);
2046 enum target_state state;
2047
2048 LOG_DEBUG("target->state: %s",
2049 target_state_name(target));
2050
2051 if (target->state == TARGET_HALTED) {
2052 LOG_DEBUG("target was already halted");
2053 return ERROR_OK;
2054 }
2055
2056 if (nds32_target_state(nds32, &state) != ERROR_OK)
2057 return ERROR_FAIL;
2058
2059 if (state != TARGET_HALTED)
2060 /* TODO: if state == TARGET_HALTED, check ETYPE is DBGI or not */
2061 if (aice_halt(aice) != ERROR_OK)
2062 return ERROR_FAIL;
2063
2064 CHECK_RETVAL(nds32->enter_debug_state(nds32, true));
2065
2066 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_HALTED));
2067
2068 return ERROR_OK;
2069 }
2070
2071 /* poll current target status */
2072 int nds32_poll(struct target *target)
2073 {
2074 struct nds32 *nds32 = target_to_nds32(target);
2075 enum target_state state;
2076
2077 if (nds32_target_state(nds32, &state) != ERROR_OK)
2078 return ERROR_FAIL;
2079
2080 if (state == TARGET_HALTED) {
2081 if (target->state != TARGET_HALTED) {
2082 /* if false_hit, continue free_run */
2083 if (nds32->enter_debug_state(nds32, true) != ERROR_OK) {
2084 struct aice_port_s *aice = target_to_aice(target);
2085 aice_run(aice);
2086 return ERROR_OK;
2087 }
2088
2089 LOG_DEBUG("Change target state to TARGET_HALTED.");
2090
2091 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
2092 }
2093 } else if (state == TARGET_RESET) {
2094 if (target->state == TARGET_HALTED) {
2095 /* similar to assert srst */
2096 register_cache_invalidate(nds32->core_cache);
2097 target->state = TARGET_RESET;
2098
2099 /* TODO: deassert srst */
2100 } else if (target->state == TARGET_RUNNING) {
2101 /* reset as running */
2102 LOG_WARNING("<-- TARGET WARNING! The debug target has been reset. -->");
2103 }
2104 } else {
2105 if (target->state != TARGET_RUNNING && target->state != TARGET_DEBUG_RUNNING) {
2106 LOG_DEBUG("Change target state to TARGET_RUNNING.");
2107 target->state = TARGET_RUNNING;
2108 target->debug_reason = DBG_REASON_NOTHALTED;
2109 }
2110 }
2111
2112 return ERROR_OK;
2113 }
2114
2115 int nds32_resume(struct target *target, int current,
2116 target_addr_t address, int handle_breakpoints, int debug_execution)
2117 {
2118 LOG_DEBUG("current %d address %08" TARGET_PRIxADDR
2119 " handle_breakpoints %d"
2120 " debug_execution %d",
2121 current, address, handle_breakpoints, debug_execution);
2122
2123 struct nds32 *nds32 = target_to_nds32(target);
2124
2125 if (target->state != TARGET_HALTED) {
2126 LOG_ERROR("Target not halted");
2127 return ERROR_TARGET_NOT_HALTED;
2128 }
2129
2130 address = nds32_nextpc(nds32, current, address);
2131
2132 LOG_DEBUG("RESUME PC %08" TARGET_PRIxADDR "%s", address, !current ? "!" : "");
2133
2134 if (!debug_execution)
2135 target_free_all_working_areas(target);
2136
2137 /* Disable HSS to avoid users misuse HSS */
2138 if (nds32_reach_max_interrupt_level(nds32) == false) {
2139 uint32_t value_ir0;
2140 nds32_get_mapped_reg(nds32, IR0, &value_ir0);
2141 value_ir0 &= ~(0x1 << 11);
2142 nds32_set_mapped_reg(nds32, IR0, value_ir0);
2143 }
2144
2145 CHECK_RETVAL(nds32->leave_debug_state(nds32, true));
2146 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_RESUMED));
2147
2148 if (nds32->virtual_hosting_ctrl_c == false) {
2149 struct aice_port_s *aice = target_to_aice(target);
2150 aice_run(aice);
2151 } else
2152 nds32->virtual_hosting_ctrl_c = false;
2153
2154 target->debug_reason = DBG_REASON_NOTHALTED;
2155 if (!debug_execution)
2156 target->state = TARGET_RUNNING;
2157 else
2158 target->state = TARGET_DEBUG_RUNNING;
2159
2160 LOG_DEBUG("target->state: %s",
2161 target_state_name(target));
2162
2163 return ERROR_OK;
2164 }
2165
2166 static int nds32_soft_reset_halt(struct target *target)
2167 {
2168 /* TODO: test it */
2169 struct nds32 *nds32 = target_to_nds32(target);
2170 struct aice_port_s *aice = target_to_aice(target);
2171
2172 aice_assert_srst(aice, AICE_SRST);
2173
2174 /* halt core and set pc to 0x0 */
2175 int retval = target_halt(target);
2176 if (retval != ERROR_OK)
2177 return retval;
2178
2179 /* start fetching from IVB */
2180 uint32_t value_ir3;
2181 nds32_get_mapped_reg(nds32, IR3, &value_ir3);
2182 nds32_set_mapped_reg(nds32, PC, value_ir3 & 0xFFFF0000);
2183
2184 return ERROR_OK;
2185 }
2186
2187 int nds32_assert_reset(struct target *target)
2188 {
2189 struct nds32 *nds32 = target_to_nds32(target);
2190 struct aice_port_s *aice = target_to_aice(target);
2191 struct nds32_cpu_version *cpu_version = &(nds32->cpu_version);
2192
2193 /* TODO: apply hw reset signal in not examined state */
2194 if (!(target_was_examined(target))) {
2195 LOG_WARNING("Reset is not asserted because the target is not examined.");
2196 LOG_WARNING("Use a reset button or power cycle the target.");
2197 return ERROR_TARGET_NOT_EXAMINED;
2198 }
2199
2200 if (target->reset_halt) {
2201 if ((nds32->soft_reset_halt)
2202 || (nds32->edm.version < 0x51)
2203 || ((nds32->edm.version == 0x51)
2204 && (cpu_version->revision == 0x1C)
2205 && (cpu_version->cpu_id_family == 0xC)
2206 && (cpu_version->cpu_id_version == 0x0)))
2207 nds32_soft_reset_halt(target);
2208 else
2209 aice_assert_srst(aice, AICE_RESET_HOLD);
2210 } else {
2211 aice_assert_srst(aice, AICE_SRST);
2212 alive_sleep(nds32->boot_time);
2213 }
2214
2215 /* set passcode for secure MCU after core reset */
2216 nds32_login(nds32);
2217
2218 /* registers are now invalid */
2219 register_cache_invalidate(nds32->core_cache);
2220
2221 target->state = TARGET_RESET;
2222
2223 return ERROR_OK;
2224 }
2225
2226 static int nds32_gdb_attach(struct nds32 *nds32)
2227 {
2228 LOG_DEBUG("nds32_gdb_attach, target coreid: %" PRId32, nds32->target->coreid);
2229
2230 if (nds32->attached == false) {
2231
2232 if (nds32->keep_target_edm_ctl) {
2233 /* backup target EDM_CTL */
2234 struct aice_port_s *aice = target_to_aice(nds32->target);
2235 aice_read_debug_reg(aice, NDS_EDM_SR_EDM_CTL, &nds32->backup_edm_ctl);
2236 }
2237
2238 target_halt(nds32->target);
2239
2240 nds32->attached = true;
2241 }
2242
2243 return ERROR_OK;
2244 }
2245
2246 static int nds32_gdb_detach(struct nds32 *nds32)
2247 {
2248 LOG_DEBUG("nds32_gdb_detach");
2249 bool backup_virtual_hosting_setting;
2250
2251 if (nds32->attached) {
2252
2253 backup_virtual_hosting_setting = nds32->virtual_hosting;
2254 /* turn off virtual hosting before resume as gdb-detach */
2255 nds32->virtual_hosting = false;
2256 target_resume(nds32->target, 1, 0, 0, 0);
2257 nds32->virtual_hosting = backup_virtual_hosting_setting;
2258
2259 if (nds32->keep_target_edm_ctl) {
2260 /* restore target EDM_CTL */
2261 struct aice_port_s *aice = target_to_aice(nds32->target);
2262 aice_write_debug_reg(aice, NDS_EDM_SR_EDM_CTL, nds32->backup_edm_ctl);
2263 }
2264
2265 nds32->attached = false;
2266 }
2267
2268 return ERROR_OK;
2269 }
2270
2271 static int nds32_callback_event_handler(struct target *target,
2272 enum target_event event, void *priv)
2273 {
2274 int retval = ERROR_OK;
2275 int target_number = *(int *)priv;
2276
2277 if (target_number != target->target_number)
2278 return ERROR_OK;
2279
2280 struct nds32 *nds32 = target_to_nds32(target);
2281
2282 switch (event) {
2283 case TARGET_EVENT_GDB_ATTACH:
2284 retval = nds32_gdb_attach(nds32);
2285 break;
2286 case TARGET_EVENT_GDB_DETACH:
2287 retval = nds32_gdb_detach(nds32);
2288 break;
2289 default:
2290 break;
2291 }
2292
2293 return retval;
2294 }
2295
2296 int nds32_init(struct nds32 *nds32)
2297 {
2298 /* Initialize anything we can set up without talking to the target */
2299 nds32->memory.access_channel = NDS_MEMORY_ACC_CPU;
2300
2301 /* register event callback */
2302 target_register_event_callback(nds32_callback_event_handler,
2303 &(nds32->target->target_number));
2304
2305 return ERROR_OK;
2306 }
2307
2308 int nds32_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
2309 {
2310 /* fill syscall parameters to file-I/O info */
2311 if (!fileio_info) {
2312 LOG_ERROR("Target has not initial file-I/O data structure");
2313 return ERROR_FAIL;
2314 }
2315
2316 struct nds32 *nds32 = target_to_nds32(target);
2317 uint32_t value_ir6;
2318 uint32_t syscall_id;
2319
2320 if (nds32->hit_syscall == false)
2321 return ERROR_FAIL;
2322
2323 nds32_get_mapped_reg(nds32, IR6, &value_ir6);
2324 syscall_id = (value_ir6 >> 16) & 0x7FFF;
2325 nds32->active_syscall_id = syscall_id;
2326
2327 LOG_DEBUG("hit syscall ID: 0x%" PRIx32, syscall_id);
2328
2329 /* free previous identifier storage */
2330 free(fileio_info->identifier);
2331 fileio_info->identifier = NULL;
2332
2333 uint32_t reg_r0, reg_r1, reg_r2;
2334 nds32_get_mapped_reg(nds32, R0, &reg_r0);
2335 nds32_get_mapped_reg(nds32, R1, &reg_r1);
2336 nds32_get_mapped_reg(nds32, R2, &reg_r2);
2337
2338 switch (syscall_id) {
2339 case NDS32_SYSCALL_EXIT:
2340 fileio_info->identifier = malloc(5);
2341 sprintf(fileio_info->identifier, "exit");
2342 fileio_info->param_1 = reg_r0;
2343 break;
2344 case NDS32_SYSCALL_OPEN:
2345 {
2346 uint8_t filename[256];
2347 fileio_info->identifier = malloc(5);
2348 sprintf(fileio_info->identifier, "open");
2349 fileio_info->param_1 = reg_r0;
2350 /* reserve fileio_info->param_2 for length of path */
2351 fileio_info->param_3 = reg_r1;
2352 fileio_info->param_4 = reg_r2;
2353
2354 target->type->read_buffer(target, reg_r0, 256, filename);
2355 fileio_info->param_2 = strlen((char *)filename);
2356 }
2357 break;
2358 case NDS32_SYSCALL_CLOSE:
2359 fileio_info->identifier = malloc(6);
2360 sprintf(fileio_info->identifier, "close");
2361 fileio_info->param_1 = reg_r0;
2362 break;
2363 case NDS32_SYSCALL_READ:
2364 fileio_info->identifier = malloc(5);
2365 sprintf(fileio_info->identifier, "read");
2366 fileio_info->param_1 = reg_r0;
2367 fileio_info->param_2 = reg_r1;
2368 fileio_info->param_3 = reg_r2;
2369 break;
2370 case NDS32_SYSCALL_WRITE:
2371 fileio_info->identifier = malloc(6);
2372 sprintf(fileio_info->identifier, "write");
2373 fileio_info->param_1 = reg_r0;
2374 fileio_info->param_2 = reg_r1;
2375 fileio_info->param_3 = reg_r2;
2376 break;
2377 case NDS32_SYSCALL_LSEEK:
2378 fileio_info->identifier = malloc(6);
2379 sprintf(fileio_info->identifier, "lseek");
2380 fileio_info->param_1 = reg_r0;
2381 fileio_info->param_2 = reg_r1;
2382 fileio_info->param_3 = reg_r2;
2383 break;
2384 case NDS32_SYSCALL_UNLINK:
2385 {
2386 uint8_t filename[256];
2387 fileio_info->identifier = malloc(7);
2388 sprintf(fileio_info->identifier, "unlink");
2389 fileio_info->param_1 = reg_r0;
2390 /* reserve fileio_info->param_2 for length of path */
2391
2392 target->type->read_buffer(target, reg_r0, 256, filename);
2393 fileio_info->param_2 = strlen((char *)filename);
2394 }
2395 break;
2396 case NDS32_SYSCALL_RENAME:
2397 {
2398 uint8_t filename[256];
2399 fileio_info->identifier = malloc(7);
2400 sprintf(fileio_info->identifier, "rename");
2401 fileio_info->param_1 = reg_r0;
2402 /* reserve fileio_info->param_2 for length of old path */
2403 fileio_info->param_3 = reg_r1;
2404 /* reserve fileio_info->param_4 for length of new path */
2405
2406 target->type->read_buffer(target, reg_r0, 256, filename);
2407 fileio_info->param_2 = strlen((char *)filename);
2408
2409 target->type->read_buffer(target, reg_r1, 256, filename);
2410 fileio_info->param_4 = strlen((char *)filename);
2411 }
2412 break;
2413 case NDS32_SYSCALL_FSTAT:
2414 fileio_info->identifier = malloc(6);
2415 sprintf(fileio_info->identifier, "fstat");
2416 fileio_info->param_1 = reg_r0;
2417 fileio_info->param_2 = reg_r1;
2418 break;
2419 case NDS32_SYSCALL_STAT:
2420 {
2421 uint8_t filename[256];
2422 fileio_info->identifier = malloc(5);
2423 sprintf(fileio_info->identifier, "stat");
2424 fileio_info->param_1 = reg_r0;
2425 /* reserve fileio_info->param_2 for length of old path */
2426 fileio_info->param_3 = reg_r1;
2427
2428 target->type->read_buffer(target, reg_r0, 256, filename);
2429 fileio_info->param_2 = strlen((char *)filename) + 1;
2430 }
2431 break;
2432 case NDS32_SYSCALL_GETTIMEOFDAY:
2433 fileio_info->identifier = malloc(13);
2434 sprintf(fileio_info->identifier, "gettimeofday");
2435 fileio_info->param_1 = reg_r0;
2436 fileio_info->param_2 = reg_r1;
2437 break;
2438 case NDS32_SYSCALL_ISATTY:
2439 fileio_info->identifier = malloc(7);
2440 sprintf(fileio_info->identifier, "isatty");
2441 fileio_info->param_1 = reg_r0;
2442 break;
2443 case NDS32_SYSCALL_SYSTEM:
2444 {
2445 uint8_t command[256];
2446 fileio_info->identifier = malloc(7);
2447 sprintf(fileio_info->identifier, "system");
2448 fileio_info->param_1 = reg_r0;
2449 /* reserve fileio_info->param_2 for length of old path */
2450
2451 target->type->read_buffer(target, reg_r0, 256, command);
2452 fileio_info->param_2 = strlen((char *)command);
2453 }
2454 break;
2455 case NDS32_SYSCALL_ERRNO:
2456 fileio_info->identifier = malloc(6);
2457 sprintf(fileio_info->identifier, "errno");
2458 nds32_set_mapped_reg(nds32, R0, nds32->virtual_hosting_errno);
2459 break;
2460 default:
2461 fileio_info->identifier = malloc(8);
2462 sprintf(fileio_info->identifier, "unknown");
2463 break;
2464 }
2465
2466 return ERROR_OK;
2467 }
2468
2469 int nds32_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
2470 {
2471 LOG_DEBUG("syscall return code: 0x%x, errno: 0x%x , ctrl_c: %s",
2472 retcode, fileio_errno, ctrl_c ? "true" : "false");
2473
2474 struct nds32 *nds32 = target_to_nds32(target);
2475
2476 nds32_set_mapped_reg(nds32, R0, (uint32_t)retcode);
2477
2478 nds32->virtual_hosting_errno = fileio_errno;
2479 nds32->virtual_hosting_ctrl_c = ctrl_c;
2480 nds32->active_syscall_id = NDS32_SYSCALL_UNDEFINED;
2481
2482 return ERROR_OK;
2483 }
2484
2485 int nds32_profiling(struct target *target, uint32_t *samples,
2486 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2487 {
2488 /* sample $PC every 10 milliseconds */
2489 uint32_t iteration = seconds * 100;
2490 struct aice_port_s *aice = target_to_aice(target);
2491 struct nds32 *nds32 = target_to_nds32(target);
2492
2493 /* REVISIT: can nds32 profile without halting? */
2494 if (target->state != TARGET_HALTED) {
2495 LOG_WARNING("target %s is not halted (profiling)", target->cmd_name);
2496 return ERROR_TARGET_NOT_HALTED;
2497 }
2498
2499 if (max_num_samples < iteration)
2500 iteration = max_num_samples;
2501
2502 int pc_regnum = nds32->register_map(nds32, PC);
2503 aice_profiling(aice, 10, iteration, pc_regnum, samples, num_samples);
2504
2505 register_cache_invalidate(nds32->core_cache);
2506
2507 return ERROR_OK;
2508 }
2509
2510 int nds32_gdb_fileio_write_memory(struct nds32 *nds32, uint32_t address,
2511 uint32_t size, const uint8_t *buffer)
2512 {
2513 if ((nds32->active_syscall_id == NDS32_SYSCALL_FSTAT) ||
2514 (nds32->active_syscall_id == NDS32_SYSCALL_STAT)) {
2515 /* If doing GDB file-I/O, target should convert 'struct stat'
2516 * from gdb-format to target-format */
2517 uint8_t stat_buffer[NDS32_STRUCT_STAT_SIZE];
2518 /* st_dev 2 */
2519 stat_buffer[0] = buffer[3];
2520 stat_buffer[1] = buffer[2];
2521 /* st_ino 2 */
2522 stat_buffer[2] = buffer[7];
2523 stat_buffer[3] = buffer[6];
2524 /* st_mode 4 */
2525 stat_buffer[4] = buffer[11];
2526 stat_buffer[5] = buffer[10];
2527 stat_buffer[6] = buffer[9];
2528 stat_buffer[7] = buffer[8];
2529 /* st_nlink 2 */
2530 stat_buffer[8] = buffer[15];
2531 stat_buffer[9] = buffer[16];
2532 /* st_uid 2 */
2533 stat_buffer[10] = buffer[19];
2534 stat_buffer[11] = buffer[18];
2535 /* st_gid 2 */
2536 stat_buffer[12] = buffer[23];
2537 stat_buffer[13] = buffer[22];
2538 /* st_rdev 2 */
2539 stat_buffer[14] = buffer[27];
2540 stat_buffer[15] = buffer[26];
2541 /* st_size 4 */
2542 stat_buffer[16] = buffer[35];
2543 stat_buffer[17] = buffer[34];
2544 stat_buffer[18] = buffer[33];
2545 stat_buffer[19] = buffer[32];
2546 /* st_atime 4 */
2547 stat_buffer[20] = buffer[55];
2548 stat_buffer[21] = buffer[54];
2549 stat_buffer[22] = buffer[53];
2550 stat_buffer[23] = buffer[52];
2551 /* st_spare1 4 */
2552 stat_buffer[24] = 0;
2553 stat_buffer[25] = 0;
2554 stat_buffer[26] = 0;
2555 stat_buffer[27] = 0;
2556 /* st_mtime 4 */
2557 stat_buffer[28] = buffer[59];
2558 stat_buffer[29] = buffer[58];
2559 stat_buffer[30] = buffer[57];
2560 stat_buffer[31] = buffer[56];
2561 /* st_spare2 4 */
2562 stat_buffer[32] = 0;
2563 stat_buffer[33] = 0;
2564 stat_buffer[34] = 0;
2565 stat_buffer[35] = 0;
2566 /* st_ctime 4 */
2567 stat_buffer[36] = buffer[63];
2568 stat_buffer[37] = buffer[62];
2569 stat_buffer[38] = buffer[61];
2570 stat_buffer[39] = buffer[60];
2571 /* st_spare3 4 */
2572 stat_buffer[40] = 0;
2573 stat_buffer[41] = 0;
2574 stat_buffer[42] = 0;
2575 stat_buffer[43] = 0;
2576 /* st_blksize 4 */
2577 stat_buffer[44] = buffer[43];
2578 stat_buffer[45] = buffer[42];
2579 stat_buffer[46] = buffer[41];
2580 stat_buffer[47] = buffer[40];
2581 /* st_blocks 4 */
2582 stat_buffer[48] = buffer[51];
2583 stat_buffer[49] = buffer[50];
2584 stat_buffer[50] = buffer[49];
2585 stat_buffer[51] = buffer[48];
2586 /* st_spare4 8 */
2587 stat_buffer[52] = 0;
2588 stat_buffer[53] = 0;
2589 stat_buffer[54] = 0;
2590 stat_buffer[55] = 0;
2591 stat_buffer[56] = 0;
2592 stat_buffer[57] = 0;
2593 stat_buffer[58] = 0;
2594 stat_buffer[59] = 0;
2595
2596 return nds32_write_buffer(nds32->target, address, NDS32_STRUCT_STAT_SIZE, stat_buffer);
2597 } else if (nds32->active_syscall_id == NDS32_SYSCALL_GETTIMEOFDAY) {
2598 /* If doing GDB file-I/O, target should convert 'struct timeval'
2599 * from gdb-format to target-format */
2600 uint8_t timeval_buffer[NDS32_STRUCT_TIMEVAL_SIZE];
2601 timeval_buffer[0] = buffer[3];
2602 timeval_buffer[1] = buffer[2];
2603 timeval_buffer[2] = buffer[1];
2604 timeval_buffer[3] = buffer[0];
2605 timeval_buffer[4] = buffer[11];
2606 timeval_buffer[5] = buffer[10];
2607 timeval_buffer[6] = buffer[9];
2608 timeval_buffer[7] = buffer[8];
2609
2610 return nds32_write_buffer(nds32->target, address, NDS32_STRUCT_TIMEVAL_SIZE, timeval_buffer);
2611 }
2612
2613 return nds32_write_buffer(nds32->target, address, size, buffer);
2614 }
2615
2616 int nds32_reset_halt(struct nds32 *nds32)
2617 {
2618 LOG_INFO("reset halt as init");
2619
2620 struct aice_port_s *aice = target_to_aice(nds32->target);
2621 aice_assert_srst(aice, AICE_RESET_HOLD);
2622
2623 return ERROR_OK;
2624 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)