jtag: linuxgpiod: drop extra parenthesis
[openocd.git] / src / target / nds32.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Copyright (C) 2013 Andes Technology *
5 * Hsiangkai Wang <hkwang@andestech.com> *
6 ***************************************************************************/
7
8 #ifdef HAVE_CONFIG_H
9 #include "config.h"
10 #endif
11
12 #include <helper/log.h>
13 #include <helper/binarybuffer.h>
14 #include "nds32.h"
15 #include "nds32_aice.h"
16 #include "nds32_tlb.h"
17 #include "nds32_disassembler.h"
18
19 struct nds32_edm_operation nds32_edm_ops[NDS32_EDM_OPERATION_MAX_NUM];
20 uint32_t nds32_edm_ops_num;
21
22 const char *nds32_debug_type_name[11] = {
23 "SOFTWARE BREAK",
24 "SOFTWARE BREAK_16",
25 "HARDWARE BREAKPOINT",
26 "DATA ADDR WATCHPOINT PRECISE",
27 "DATA VALUE WATCHPOINT PRECISE",
28 "DATA VALUE WATCHPOINT IMPRECISE",
29 "DEBUG INTERRUPT",
30 "HARDWARE SINGLE STEP",
31 "DATA ADDR WATCHPOINT NEXT PRECISE",
32 "DATA VALUE WATCHPOINT NEXT PRECISE",
33 "LOAD STORE GLOBAL STOP",
34 };
35
36 static const int nds32_lm_size_table[16] = {
37 4 * 1024,
38 8 * 1024,
39 16 * 1024,
40 32 * 1024,
41 64 * 1024,
42 128 * 1024,
43 256 * 1024,
44 512 * 1024,
45 1024 * 1024,
46 1 * 1024,
47 2 * 1024,
48 };
49
50 static const int nds32_line_size_table[6] = {
51 0,
52 8,
53 16,
54 32,
55 64,
56 128,
57 };
58
59 static int nds32_get_core_reg(struct reg *reg)
60 {
61 int retval;
62 struct nds32_reg *reg_arch_info = reg->arch_info;
63 struct target *target = reg_arch_info->target;
64 struct nds32 *nds32 = target_to_nds32(target);
65 struct aice_port_s *aice = target_to_aice(target);
66
67 if (target->state != TARGET_HALTED) {
68 LOG_ERROR("Target not halted");
69 return ERROR_TARGET_NOT_HALTED;
70 }
71
72 if (reg->valid) {
73 uint32_t val = buf_get_u32(reg_arch_info->value, 0, 32);
74 LOG_DEBUG("reading register(cached) %" PRIi32 "(%s), value: 0x%8.8" PRIx32,
75 reg_arch_info->num, reg->name, val);
76 return ERROR_OK;
77 }
78
79 int mapped_regnum = nds32->register_map(nds32, reg_arch_info->num);
80
81 if (reg_arch_info->enable == false) {
82 buf_set_u32(reg_arch_info->value, 0, 32, NDS32_REGISTER_DISABLE);
83 retval = ERROR_FAIL;
84 } else {
85 uint32_t val = 0;
86 if ((nds32->fpu_enable == false)
87 && (nds32_reg_type(mapped_regnum) == NDS32_REG_TYPE_FPU)) {
88 retval = ERROR_OK;
89 } else if ((nds32->audio_enable == false)
90 && (nds32_reg_type(mapped_regnum) == NDS32_REG_TYPE_AUMR)) {
91 retval = ERROR_OK;
92 } else {
93 retval = aice_read_register(aice, mapped_regnum, &val);
94 }
95 buf_set_u32(reg_arch_info->value, 0, 32, val);
96
97 LOG_DEBUG("reading register %" PRIi32 "(%s), value: 0x%8.8" PRIx32,
98 reg_arch_info->num, reg->name, val);
99 }
100
101 if (retval == ERROR_OK) {
102 reg->valid = true;
103 reg->dirty = false;
104 }
105
106 return retval;
107 }
108
109 static int nds32_get_core_reg_64(struct reg *reg)
110 {
111 int retval;
112 struct nds32_reg *reg_arch_info = reg->arch_info;
113 struct target *target = reg_arch_info->target;
114 struct nds32 *nds32 = target_to_nds32(target);
115 struct aice_port_s *aice = target_to_aice(target);
116
117 if (target->state != TARGET_HALTED) {
118 LOG_ERROR("Target not halted");
119 return ERROR_TARGET_NOT_HALTED;
120 }
121
122 if (reg->valid)
123 return ERROR_OK;
124
125 if (reg_arch_info->enable == false) {
126 buf_set_u64(reg_arch_info->value, 0, 64, NDS32_REGISTER_DISABLE);
127 retval = ERROR_FAIL;
128 } else {
129 uint64_t val = 0;
130 if ((nds32->fpu_enable == false)
131 && ((reg_arch_info->num >= FD0) && (reg_arch_info->num <= FD31))) {
132 retval = ERROR_OK;
133 } else {
134 retval = aice_read_reg_64(aice, reg_arch_info->num, &val);
135 }
136 buf_set_u64(reg_arch_info->value, 0, 64, val);
137 }
138
139 if (retval == ERROR_OK) {
140 reg->valid = true;
141 reg->dirty = false;
142 }
143
144 return retval;
145 }
146
147 static int nds32_update_psw(struct nds32 *nds32)
148 {
149 uint32_t value_ir0;
150 struct aice_port_s *aice = target_to_aice(nds32->target);
151
152 nds32_get_mapped_reg(nds32, IR0, &value_ir0);
153
154 /* Save data memory endian */
155 if ((value_ir0 >> 5) & 0x1) {
156 nds32->data_endian = TARGET_BIG_ENDIAN;
157 aice_set_data_endian(aice, AICE_BIG_ENDIAN);
158 } else {
159 nds32->data_endian = TARGET_LITTLE_ENDIAN;
160 aice_set_data_endian(aice, AICE_LITTLE_ENDIAN);
161 }
162
163 /* Save translation status */
164 nds32->memory.address_translation = ((value_ir0 >> 7) & 0x1) ? true : false;
165
166 return ERROR_OK;
167 }
168
169 static int nds32_update_mmu_info(struct nds32 *nds32)
170 {
171 uint32_t value;
172
173 /* Update MMU control status */
174 nds32_get_mapped_reg(nds32, MR0, &value);
175 nds32->mmu_config.default_min_page_size = value & 0x1;
176 nds32->mmu_config.multiple_page_size_in_use = (value >> 10) & 0x1;
177
178 return ERROR_OK;
179 }
180
181 static int nds32_update_cache_info(struct nds32 *nds32)
182 {
183 uint32_t value;
184
185 if (nds32_get_mapped_reg(nds32, MR8, &value) == ERROR_OK) {
186 if (value & 0x1)
187 nds32->memory.icache.enable = true;
188 else
189 nds32->memory.icache.enable = false;
190
191 if (value & 0x2)
192 nds32->memory.dcache.enable = true;
193 else
194 nds32->memory.dcache.enable = false;
195 } else {
196 nds32->memory.icache.enable = false;
197 nds32->memory.dcache.enable = false;
198 }
199
200 return ERROR_OK;
201 }
202
203 static int nds32_update_lm_info(struct nds32 *nds32)
204 {
205 struct nds32_memory *memory = &(nds32->memory);
206 uint32_t value_mr6;
207 uint32_t value_mr7;
208
209 nds32_get_mapped_reg(nds32, MR6, &value_mr6);
210 if (value_mr6 & 0x1)
211 memory->ilm_enable = true;
212 else
213 memory->ilm_enable = false;
214
215 if (memory->ilm_align_ver == 0) { /* 1MB aligned */
216 memory->ilm_start = value_mr6 & 0xFFF00000;
217 memory->ilm_end = memory->ilm_start + memory->ilm_size;
218 } else if (memory->ilm_align_ver == 1) { /* aligned to local memory size */
219 memory->ilm_start = value_mr6 & 0xFFFFFC00;
220 memory->ilm_end = memory->ilm_start + memory->ilm_size;
221 } else {
222 memory->ilm_start = -1;
223 memory->ilm_end = -1;
224 }
225
226 nds32_get_mapped_reg(nds32, MR7, &value_mr7);
227 if (value_mr7 & 0x1)
228 memory->dlm_enable = true;
229 else
230 memory->dlm_enable = false;
231
232 if (memory->dlm_align_ver == 0) { /* 1MB aligned */
233 memory->dlm_start = value_mr7 & 0xFFF00000;
234 memory->dlm_end = memory->dlm_start + memory->dlm_size;
235 } else if (memory->dlm_align_ver == 1) { /* aligned to local memory size */
236 memory->dlm_start = value_mr7 & 0xFFFFFC00;
237 memory->dlm_end = memory->dlm_start + memory->dlm_size;
238 } else {
239 memory->dlm_start = -1;
240 memory->dlm_end = -1;
241 }
242
243 return ERROR_OK;
244 }
245
246 /**
247 * If fpu/audio is disabled, to access fpu/audio registers will cause
248 * exceptions. So, we need to check if fpu/audio is enabled or not as
249 * target is halted. If fpu/audio is disabled, as users access fpu/audio
250 * registers, OpenOCD will return fake value 0 instead of accessing
251 * registers through DIM.
252 */
253 static int nds32_check_extension(struct nds32 *nds32)
254 {
255 uint32_t value;
256
257 nds32_get_mapped_reg(nds32, FUCPR, &value);
258 if (value == NDS32_REGISTER_DISABLE) {
259 nds32->fpu_enable = false;
260 nds32->audio_enable = false;
261 return ERROR_OK;
262 }
263
264 if (value & 0x1)
265 nds32->fpu_enable = true;
266 else
267 nds32->fpu_enable = false;
268
269 if (value & 0x80000000)
270 nds32->audio_enable = true;
271 else
272 nds32->audio_enable = false;
273
274 return ERROR_OK;
275 }
276
277 static int nds32_set_core_reg(struct reg *reg, uint8_t *buf)
278 {
279 struct nds32_reg *reg_arch_info = reg->arch_info;
280 struct target *target = reg_arch_info->target;
281 struct nds32 *nds32 = target_to_nds32(target);
282 struct aice_port_s *aice = target_to_aice(target);
283 uint32_t value = buf_get_u32(buf, 0, 32);
284
285 if (target->state != TARGET_HALTED) {
286 LOG_ERROR("Target not halted");
287 return ERROR_TARGET_NOT_HALTED;
288 }
289
290 int mapped_regnum = nds32->register_map(nds32, reg_arch_info->num);
291
292 /* ignore values that will generate exception */
293 if (nds32_reg_exception(mapped_regnum, value))
294 return ERROR_OK;
295
296 LOG_DEBUG("writing register %" PRIi32 "(%s) with value 0x%8.8" PRIx32,
297 reg_arch_info->num, reg->name, value);
298
299 if ((nds32->fpu_enable == false) &&
300 (nds32_reg_type(mapped_regnum) == NDS32_REG_TYPE_FPU)) {
301
302 buf_set_u32(reg->value, 0, 32, 0);
303 } else if ((nds32->audio_enable == false) &&
304 (nds32_reg_type(mapped_regnum) == NDS32_REG_TYPE_AUMR)) {
305
306 buf_set_u32(reg->value, 0, 32, 0);
307 } else {
308 buf_set_u32(reg->value, 0, 32, value);
309 uint32_t val = buf_get_u32(reg_arch_info->value, 0, 32);
310 aice_write_register(aice, mapped_regnum, val);
311
312 /* After set value to registers, read the value from target
313 * to avoid W1C inconsistency. */
314 aice_read_register(aice, mapped_regnum, &val);
315 buf_set_u32(reg_arch_info->value, 0, 32, val);
316 }
317
318 reg->valid = true;
319 reg->dirty = false;
320
321 /* update registers to take effect right now */
322 if (mapped_regnum == IR0) {
323 nds32_update_psw(nds32);
324 } else if (mapped_regnum == MR0) {
325 nds32_update_mmu_info(nds32);
326 } else if ((mapped_regnum == MR6) || (mapped_regnum == MR7)) {
327 /* update lm information */
328 nds32_update_lm_info(nds32);
329 } else if (mapped_regnum == MR8) {
330 nds32_update_cache_info(nds32);
331 } else if (mapped_regnum == FUCPR) {
332 /* update audio/fpu setting */
333 nds32_check_extension(nds32);
334 }
335
336 return ERROR_OK;
337 }
338
339 static int nds32_set_core_reg_64(struct reg *reg, uint8_t *buf)
340 {
341 struct nds32_reg *reg_arch_info = reg->arch_info;
342 struct target *target = reg_arch_info->target;
343 struct nds32 *nds32 = target_to_nds32(target);
344 uint32_t low_part = buf_get_u32(buf, 0, 32);
345 uint32_t high_part = buf_get_u32(buf, 32, 32);
346
347 if (target->state != TARGET_HALTED) {
348 LOG_ERROR("Target not halted");
349 return ERROR_TARGET_NOT_HALTED;
350 }
351
352 if ((nds32->fpu_enable == false) &&
353 ((reg_arch_info->num >= FD0) && (reg_arch_info->num <= FD31))) {
354
355 buf_set_u32(reg->value, 0, 32, 0);
356 buf_set_u32(reg->value, 32, 32, 0);
357
358 reg->valid = true;
359 reg->dirty = false;
360 } else {
361 buf_set_u32(reg->value, 0, 32, low_part);
362 buf_set_u32(reg->value, 32, 32, high_part);
363
364 reg->valid = true;
365 reg->dirty = true;
366 }
367
368 return ERROR_OK;
369 }
370
371 static const struct reg_arch_type nds32_reg_access_type = {
372 .get = nds32_get_core_reg,
373 .set = nds32_set_core_reg,
374 };
375
376 static const struct reg_arch_type nds32_reg_access_type_64 = {
377 .get = nds32_get_core_reg_64,
378 .set = nds32_set_core_reg_64,
379 };
380
381 static struct reg_cache *nds32_build_reg_cache(struct target *target,
382 struct nds32 *nds32)
383 {
384 struct reg_cache *cache = calloc(sizeof(struct reg_cache), 1);
385 struct reg *reg_list = calloc(TOTAL_REG_NUM, sizeof(struct reg));
386 struct nds32_reg *reg_arch_info = calloc(TOTAL_REG_NUM, sizeof(struct nds32_reg));
387 int i;
388
389 if (!cache || !reg_list || !reg_arch_info) {
390 free(cache);
391 free(reg_list);
392 free(reg_arch_info);
393 return NULL;
394 }
395
396 cache->name = "Andes registers";
397 cache->next = NULL;
398 cache->reg_list = reg_list;
399 cache->num_regs = 0;
400
401 for (i = 0; i < TOTAL_REG_NUM; i++) {
402 reg_arch_info[i].num = i;
403 reg_arch_info[i].target = target;
404 reg_arch_info[i].nds32 = nds32;
405 reg_arch_info[i].enable = false;
406
407 reg_list[i].name = nds32_reg_simple_name(i);
408 reg_list[i].number = reg_arch_info[i].num;
409 reg_list[i].size = nds32_reg_size(i);
410 reg_list[i].arch_info = &reg_arch_info[i];
411
412 reg_list[i].reg_data_type = calloc(sizeof(struct reg_data_type), 1);
413
414 if (reg_arch_info[i].num >= FD0 && reg_arch_info[i].num <= FD31) {
415 reg_list[i].value = reg_arch_info[i].value;
416 reg_list[i].type = &nds32_reg_access_type_64;
417
418 reg_list[i].reg_data_type->type = REG_TYPE_IEEE_DOUBLE;
419 reg_list[i].reg_data_type->id = "ieee_double";
420 reg_list[i].group = "float";
421 } else {
422 reg_list[i].value = reg_arch_info[i].value;
423 reg_list[i].type = &nds32_reg_access_type;
424 reg_list[i].group = "general";
425
426 if ((reg_arch_info[i].num >= FS0) && (reg_arch_info[i].num <= FS31)) {
427 reg_list[i].reg_data_type->type = REG_TYPE_IEEE_SINGLE;
428 reg_list[i].reg_data_type->id = "ieee_single";
429 reg_list[i].group = "float";
430 } else if ((reg_arch_info[i].num == FPCSR) ||
431 (reg_arch_info[i].num == FPCFG)) {
432 reg_list[i].group = "float";
433 } else if ((reg_arch_info[i].num == R28) ||
434 (reg_arch_info[i].num == R29) ||
435 (reg_arch_info[i].num == R31)) {
436 reg_list[i].reg_data_type->type = REG_TYPE_DATA_PTR;
437 reg_list[i].reg_data_type->id = "data_ptr";
438 } else if ((reg_arch_info[i].num == R30) ||
439 (reg_arch_info[i].num == PC)) {
440 reg_list[i].reg_data_type->type = REG_TYPE_CODE_PTR;
441 reg_list[i].reg_data_type->id = "code_ptr";
442 } else {
443 reg_list[i].reg_data_type->type = REG_TYPE_UINT32;
444 reg_list[i].reg_data_type->id = "uint32";
445 }
446 }
447
448 if (reg_arch_info[i].num >= R16 && reg_arch_info[i].num <= R25)
449 reg_list[i].caller_save = true;
450 else
451 reg_list[i].caller_save = false;
452
453 reg_list[i].feature = malloc(sizeof(struct reg_feature));
454
455 if (reg_arch_info[i].num >= R0 && reg_arch_info[i].num <= IFC_LP)
456 reg_list[i].feature->name = "org.gnu.gdb.nds32.core";
457 else if (reg_arch_info[i].num >= CR0 && reg_arch_info[i].num <= SECUR0)
458 reg_list[i].feature->name = "org.gnu.gdb.nds32.system";
459 else if (reg_arch_info[i].num >= D0L24 && reg_arch_info[i].num <= CBE3)
460 reg_list[i].feature->name = "org.gnu.gdb.nds32.audio";
461 else if (reg_arch_info[i].num >= FPCSR && reg_arch_info[i].num <= FD31)
462 reg_list[i].feature->name = "org.gnu.gdb.nds32.fpu";
463
464 cache->num_regs++;
465 }
466
467 nds32->core_cache = cache;
468
469 return cache;
470 }
471
472 static int nds32_reg_cache_init(struct target *target, struct nds32 *nds32)
473 {
474 struct reg_cache *cache;
475
476 cache = nds32_build_reg_cache(target, nds32);
477 if (!cache)
478 return ERROR_FAIL;
479
480 *register_get_last_cache_p(&target->reg_cache) = cache;
481
482 return ERROR_OK;
483 }
484
485 static struct reg *nds32_reg_current(struct nds32 *nds32, unsigned regnum)
486 {
487 struct reg *r;
488
489 r = nds32->core_cache->reg_list + regnum;
490
491 return r;
492 }
493
494 int nds32_full_context(struct nds32 *nds32)
495 {
496 uint32_t value, value_ir0;
497
498 /* save $pc & $psw */
499 nds32_get_mapped_reg(nds32, PC, &value);
500 nds32_get_mapped_reg(nds32, IR0, &value_ir0);
501
502 nds32_update_psw(nds32);
503 nds32_update_mmu_info(nds32);
504 nds32_update_cache_info(nds32);
505 nds32_update_lm_info(nds32);
506
507 nds32_check_extension(nds32);
508
509 return ERROR_OK;
510 }
511
512 /* get register value internally */
513 int nds32_get_mapped_reg(struct nds32 *nds32, unsigned regnum, uint32_t *value)
514 {
515 struct reg_cache *reg_cache = nds32->core_cache;
516 struct reg *r;
517
518 if (regnum > reg_cache->num_regs)
519 return ERROR_FAIL;
520
521 r = nds32_reg_current(nds32, regnum);
522
523 if (r->type->get(r) != ERROR_OK)
524 return ERROR_FAIL;
525
526 *value = buf_get_u32(r->value, 0, 32);
527
528 return ERROR_OK;
529 }
530
531 /** set register internally */
532 int nds32_set_mapped_reg(struct nds32 *nds32, unsigned regnum, uint32_t value)
533 {
534 struct reg_cache *reg_cache = nds32->core_cache;
535 struct reg *r;
536 uint8_t set_value[4];
537
538 if (regnum > reg_cache->num_regs)
539 return ERROR_FAIL;
540
541 r = nds32_reg_current(nds32, regnum);
542
543 buf_set_u32(set_value, 0, 32, value);
544
545 return r->type->set(r, set_value);
546 }
547
548 /** get general register list */
549 static int nds32_get_general_reg_list(struct nds32 *nds32,
550 struct reg **reg_list[], int *reg_list_size)
551 {
552 struct reg *reg_current;
553 int i;
554 int current_idx;
555
556 /** freed in gdb_server.c */
557 *reg_list = malloc(sizeof(struct reg *) * (IFC_LP - R0 + 1));
558 current_idx = 0;
559
560 for (i = R0; i < IFC_LP + 1; i++) {
561 reg_current = nds32_reg_current(nds32, i);
562 if (((struct nds32_reg *)reg_current->arch_info)->enable) {
563 (*reg_list)[current_idx] = reg_current;
564 current_idx++;
565 }
566 }
567 *reg_list_size = current_idx;
568
569 return ERROR_OK;
570 }
571
572 /** get all register list */
573 static int nds32_get_all_reg_list(struct nds32 *nds32,
574 struct reg **reg_list[], int *reg_list_size)
575 {
576 struct reg_cache *reg_cache = nds32->core_cache;
577 struct reg *reg_current;
578 unsigned int i;
579
580 *reg_list_size = reg_cache->num_regs;
581
582 /** freed in gdb_server.c */
583 *reg_list = malloc(sizeof(struct reg *) * (*reg_list_size));
584
585 for (i = 0; i < reg_cache->num_regs; i++) {
586 reg_current = nds32_reg_current(nds32, i);
587 reg_current->exist = ((struct nds32_reg *)
588 reg_current->arch_info)->enable;
589 (*reg_list)[i] = reg_current;
590 }
591
592 return ERROR_OK;
593 }
594
595 /** get all register list */
596 int nds32_get_gdb_reg_list(struct target *target,
597 struct reg **reg_list[], int *reg_list_size,
598 enum target_register_class reg_class)
599 {
600 struct nds32 *nds32 = target_to_nds32(target);
601
602 switch (reg_class) {
603 case REG_CLASS_ALL:
604 return nds32_get_all_reg_list(nds32, reg_list, reg_list_size);
605 case REG_CLASS_GENERAL:
606 return nds32_get_general_reg_list(nds32, reg_list, reg_list_size);
607 default:
608 return ERROR_FAIL;
609 }
610
611 return ERROR_FAIL;
612 }
613
614 static int nds32_select_memory_mode(struct target *target, uint32_t address,
615 uint32_t length, uint32_t *end_address)
616 {
617 struct nds32 *nds32 = target_to_nds32(target);
618 struct aice_port_s *aice = target_to_aice(target);
619 struct nds32_memory *memory = &(nds32->memory);
620 struct nds32_edm *edm = &(nds32->edm);
621 uint32_t dlm_start, dlm_end;
622 uint32_t ilm_start, ilm_end;
623 uint32_t address_end = address + length;
624
625 /* init end_address */
626 *end_address = address_end;
627
628 if (memory->access_channel == NDS_MEMORY_ACC_CPU)
629 return ERROR_OK;
630
631 if (edm->access_control == false) {
632 LOG_DEBUG("EDM does not support ACC_CTL");
633 return ERROR_OK;
634 }
635
636 if (edm->direct_access_local_memory == false) {
637 LOG_DEBUG("EDM does not support DALM");
638 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
639 return ERROR_OK;
640 }
641
642 if (memory->mode != NDS_MEMORY_SELECT_AUTO) {
643 LOG_DEBUG("Memory mode is not AUTO");
644 return ERROR_OK;
645 }
646
647 /* set default mode */
648 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
649
650 if ((memory->ilm_base != 0) && (memory->ilm_enable == true)) {
651 ilm_start = memory->ilm_start;
652 ilm_end = memory->ilm_end;
653
654 /* case 1, address < ilm_start */
655 if (address < ilm_start) {
656 if (ilm_start < address_end) {
657 /* update end_address to split non-ILM from ILM */
658 *end_address = ilm_start;
659 }
660 /* MEM mode */
661 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
662 } else if ((ilm_start <= address) && (address < ilm_end)) {
663 /* case 2, ilm_start <= address < ilm_end */
664 if (ilm_end < address_end) {
665 /* update end_address to split non-ILM from ILM */
666 *end_address = ilm_end;
667 }
668 /* ILM mode */
669 aice_memory_mode(aice, NDS_MEMORY_SELECT_ILM);
670 } else { /* case 3, ilm_end <= address */
671 /* MEM mode */
672 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
673 }
674
675 return ERROR_OK;
676 } else {
677 LOG_DEBUG("ILM is not enabled");
678 }
679
680 if ((memory->dlm_base != 0) && (memory->dlm_enable == true)) {
681 dlm_start = memory->dlm_start;
682 dlm_end = memory->dlm_end;
683
684 /* case 1, address < dlm_start */
685 if (address < dlm_start) {
686 if (dlm_start < address_end) {
687 /* update end_address to split non-DLM from DLM */
688 *end_address = dlm_start;
689 }
690 /* MEM mode */
691 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
692 } else if ((dlm_start <= address) && (address < dlm_end)) {
693 /* case 2, dlm_start <= address < dlm_end */
694 if (dlm_end < address_end) {
695 /* update end_address to split non-DLM from DLM */
696 *end_address = dlm_end;
697 }
698 /* DLM mode */
699 aice_memory_mode(aice, NDS_MEMORY_SELECT_DLM);
700 } else { /* case 3, dlm_end <= address */
701 /* MEM mode */
702 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
703 }
704
705 return ERROR_OK;
706 } else {
707 LOG_DEBUG("DLM is not enabled");
708 }
709
710 return ERROR_OK;
711 }
712
713 int nds32_read_buffer(struct target *target, uint32_t address,
714 uint32_t size, uint8_t *buffer)
715 {
716 struct nds32 *nds32 = target_to_nds32(target);
717 struct nds32_memory *memory = &(nds32->memory);
718
719 if ((memory->access_channel == NDS_MEMORY_ACC_CPU) &&
720 (target->state != TARGET_HALTED)) {
721 LOG_WARNING("target was not halted");
722 return ERROR_TARGET_NOT_HALTED;
723 }
724
725 LOG_DEBUG("READ BUFFER: ADDR %08" PRIx32 " SIZE %08" PRIx32,
726 address,
727 size);
728
729 int retval = ERROR_OK;
730 struct aice_port_s *aice = target_to_aice(target);
731 uint32_t end_address;
732
733 if (((address % 2) == 0) && (size == 2)) {
734 nds32_select_memory_mode(target, address, 2, &end_address);
735 return aice_read_mem_unit(aice, address, 2, 1, buffer);
736 }
737
738 /* handle unaligned head bytes */
739 if (address % 4) {
740 uint32_t unaligned = 4 - (address % 4);
741
742 if (unaligned > size)
743 unaligned = size;
744
745 nds32_select_memory_mode(target, address, unaligned, &end_address);
746 retval = aice_read_mem_unit(aice, address, 1, unaligned, buffer);
747 if (retval != ERROR_OK)
748 return retval;
749
750 buffer += unaligned;
751 address += unaligned;
752 size -= unaligned;
753 }
754
755 /* handle aligned words */
756 if (size >= 4) {
757 int aligned = size - (size % 4);
758 int read_len;
759
760 do {
761 nds32_select_memory_mode(target, address, aligned, &end_address);
762
763 read_len = end_address - address;
764
765 if (read_len > 8)
766 retval = aice_read_mem_bulk(aice, address, read_len, buffer);
767 else
768 retval = aice_read_mem_unit(aice, address, 4, read_len / 4, buffer);
769
770 if (retval != ERROR_OK)
771 return retval;
772
773 buffer += read_len;
774 address += read_len;
775 size -= read_len;
776 aligned -= read_len;
777
778 } while (aligned != 0);
779 }
780
781 /*prevent byte access when possible (avoid AHB access limitations in some cases)*/
782 if (size >= 2) {
783 int aligned = size - (size % 2);
784 nds32_select_memory_mode(target, address, aligned, &end_address);
785 retval = aice_read_mem_unit(aice, address, 2, aligned / 2, buffer);
786 if (retval != ERROR_OK)
787 return retval;
788
789 buffer += aligned;
790 address += aligned;
791 size -= aligned;
792 }
793 /* handle tail writes of less than 4 bytes */
794 if (size > 0) {
795 nds32_select_memory_mode(target, address, size, &end_address);
796 retval = aice_read_mem_unit(aice, address, 1, size, buffer);
797 if (retval != ERROR_OK)
798 return retval;
799 }
800
801 return ERROR_OK;
802 }
803
804 int nds32_read_memory(struct target *target, uint32_t address,
805 uint32_t size, uint32_t count, uint8_t *buffer)
806 {
807 struct aice_port_s *aice = target_to_aice(target);
808
809 return aice_read_mem_unit(aice, address, size, count, buffer);
810 }
811
812 int nds32_read_phys_memory(struct target *target, target_addr_t address,
813 uint32_t size, uint32_t count, uint8_t *buffer)
814 {
815 struct aice_port_s *aice = target_to_aice(target);
816 struct nds32 *nds32 = target_to_nds32(target);
817 struct nds32_memory *memory = &(nds32->memory);
818 enum nds_memory_access orig_channel;
819 int result;
820
821 /* switch to BUS access mode to skip MMU */
822 orig_channel = memory->access_channel;
823 memory->access_channel = NDS_MEMORY_ACC_BUS;
824 aice_memory_access(aice, memory->access_channel);
825
826 /* The input address is physical address. No need to do address translation. */
827 result = aice_read_mem_unit(aice, address, size, count, buffer);
828
829 /* restore to origin access mode */
830 memory->access_channel = orig_channel;
831 aice_memory_access(aice, memory->access_channel);
832
833 return result;
834 }
835
836 int nds32_write_buffer(struct target *target, uint32_t address,
837 uint32_t size, const uint8_t *buffer)
838 {
839 struct nds32 *nds32 = target_to_nds32(target);
840 struct nds32_memory *memory = &(nds32->memory);
841
842 if ((memory->access_channel == NDS_MEMORY_ACC_CPU) &&
843 (target->state != TARGET_HALTED)) {
844 LOG_WARNING("target was not halted");
845 return ERROR_TARGET_NOT_HALTED;
846 }
847
848 LOG_DEBUG("WRITE BUFFER: ADDR %08" PRIx32 " SIZE %08" PRIx32,
849 address,
850 size);
851
852 struct aice_port_s *aice = target_to_aice(target);
853 int retval = ERROR_OK;
854 uint32_t end_address;
855
856 if (((address % 2) == 0) && (size == 2)) {
857 nds32_select_memory_mode(target, address, 2, &end_address);
858 return aice_write_mem_unit(aice, address, 2, 1, buffer);
859 }
860
861 /* handle unaligned head bytes */
862 if (address % 4) {
863 uint32_t unaligned = 4 - (address % 4);
864
865 if (unaligned > size)
866 unaligned = size;
867
868 nds32_select_memory_mode(target, address, unaligned, &end_address);
869 retval = aice_write_mem_unit(aice, address, 1, unaligned, buffer);
870 if (retval != ERROR_OK)
871 return retval;
872
873 buffer += unaligned;
874 address += unaligned;
875 size -= unaligned;
876 }
877
878 /* handle aligned words */
879 if (size >= 4) {
880 int aligned = size - (size % 4);
881 int write_len;
882
883 do {
884 nds32_select_memory_mode(target, address, aligned, &end_address);
885
886 write_len = end_address - address;
887 if (write_len > 8)
888 retval = aice_write_mem_bulk(aice, address, write_len, buffer);
889 else
890 retval = aice_write_mem_unit(aice, address, 4, write_len / 4, buffer);
891 if (retval != ERROR_OK)
892 return retval;
893
894 buffer += write_len;
895 address += write_len;
896 size -= write_len;
897 aligned -= write_len;
898
899 } while (aligned != 0);
900 }
901
902 /* handle tail writes of less than 4 bytes */
903 if (size > 0) {
904 nds32_select_memory_mode(target, address, size, &end_address);
905 retval = aice_write_mem_unit(aice, address, 1, size, buffer);
906 if (retval != ERROR_OK)
907 return retval;
908 }
909
910 return retval;
911 }
912
913 int nds32_write_memory(struct target *target, uint32_t address,
914 uint32_t size, uint32_t count, const uint8_t *buffer)
915 {
916 struct aice_port_s *aice = target_to_aice(target);
917
918 return aice_write_mem_unit(aice, address, size, count, buffer);
919 }
920
921 int nds32_write_phys_memory(struct target *target, target_addr_t address,
922 uint32_t size, uint32_t count, const uint8_t *buffer)
923 {
924 struct aice_port_s *aice = target_to_aice(target);
925 struct nds32 *nds32 = target_to_nds32(target);
926 struct nds32_memory *memory = &(nds32->memory);
927 enum nds_memory_access orig_channel;
928 int result;
929
930 /* switch to BUS access mode to skip MMU */
931 orig_channel = memory->access_channel;
932 memory->access_channel = NDS_MEMORY_ACC_BUS;
933 aice_memory_access(aice, memory->access_channel);
934
935 /* The input address is physical address. No need to do address translation. */
936 result = aice_write_mem_unit(aice, address, size, count, buffer);
937
938 /* restore to origin access mode */
939 memory->access_channel = orig_channel;
940 aice_memory_access(aice, memory->access_channel);
941
942 return result;
943 }
944
945 int nds32_mmu(struct target *target, int *enabled)
946 {
947 if (target->state != TARGET_HALTED) {
948 LOG_ERROR("%s: target not halted", __func__);
949 return ERROR_TARGET_INVALID;
950 }
951
952 struct nds32 *nds32 = target_to_nds32(target);
953 struct nds32_memory *memory = &(nds32->memory);
954 struct nds32_mmu_config *mmu_config = &(nds32->mmu_config);
955
956 if ((mmu_config->memory_protection == 2) && (memory->address_translation == true))
957 *enabled = 1;
958 else
959 *enabled = 0;
960
961 return ERROR_OK;
962 }
963
964 int nds32_arch_state(struct target *target)
965 {
966 struct nds32 *nds32 = target_to_nds32(target);
967
968 if (nds32->common_magic != NDS32_COMMON_MAGIC) {
969 LOG_ERROR("BUG: called for a non-Andes target");
970 return ERROR_FAIL;
971 }
972
973 uint32_t value_pc, value_psw;
974
975 nds32_get_mapped_reg(nds32, PC, &value_pc);
976 nds32_get_mapped_reg(nds32, IR0, &value_psw);
977
978 LOG_USER("target halted due to %s\n"
979 "psw: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "%s",
980 debug_reason_name(target),
981 value_psw,
982 value_pc,
983 nds32->virtual_hosting ? ", virtual hosting" : "");
984
985 /* save pc value to pseudo register pc */
986 struct reg *reg = register_get_by_name(target->reg_cache, "pc", true);
987 buf_set_u32(reg->value, 0, 32, value_pc);
988
989 return ERROR_OK;
990 }
991
992 static void nds32_init_must_have_registers(struct nds32 *nds32)
993 {
994 struct reg_cache *reg_cache = nds32->core_cache;
995
996 /** MUST have general registers */
997 ((struct nds32_reg *)reg_cache->reg_list[R0].arch_info)->enable = true;
998 ((struct nds32_reg *)reg_cache->reg_list[R1].arch_info)->enable = true;
999 ((struct nds32_reg *)reg_cache->reg_list[R2].arch_info)->enable = true;
1000 ((struct nds32_reg *)reg_cache->reg_list[R3].arch_info)->enable = true;
1001 ((struct nds32_reg *)reg_cache->reg_list[R4].arch_info)->enable = true;
1002 ((struct nds32_reg *)reg_cache->reg_list[R5].arch_info)->enable = true;
1003 ((struct nds32_reg *)reg_cache->reg_list[R6].arch_info)->enable = true;
1004 ((struct nds32_reg *)reg_cache->reg_list[R7].arch_info)->enable = true;
1005 ((struct nds32_reg *)reg_cache->reg_list[R8].arch_info)->enable = true;
1006 ((struct nds32_reg *)reg_cache->reg_list[R9].arch_info)->enable = true;
1007 ((struct nds32_reg *)reg_cache->reg_list[R10].arch_info)->enable = true;
1008 ((struct nds32_reg *)reg_cache->reg_list[R15].arch_info)->enable = true;
1009 ((struct nds32_reg *)reg_cache->reg_list[R28].arch_info)->enable = true;
1010 ((struct nds32_reg *)reg_cache->reg_list[R29].arch_info)->enable = true;
1011 ((struct nds32_reg *)reg_cache->reg_list[R30].arch_info)->enable = true;
1012 ((struct nds32_reg *)reg_cache->reg_list[R31].arch_info)->enable = true;
1013 ((struct nds32_reg *)reg_cache->reg_list[PC].arch_info)->enable = true;
1014
1015 /** MUST have configuration system registers */
1016 ((struct nds32_reg *)reg_cache->reg_list[CR0].arch_info)->enable = true;
1017 ((struct nds32_reg *)reg_cache->reg_list[CR1].arch_info)->enable = true;
1018 ((struct nds32_reg *)reg_cache->reg_list[CR2].arch_info)->enable = true;
1019 ((struct nds32_reg *)reg_cache->reg_list[CR3].arch_info)->enable = true;
1020 ((struct nds32_reg *)reg_cache->reg_list[CR4].arch_info)->enable = true;
1021
1022 /** MUST have interrupt system registers */
1023 ((struct nds32_reg *)reg_cache->reg_list[IR0].arch_info)->enable = true;
1024 ((struct nds32_reg *)reg_cache->reg_list[IR1].arch_info)->enable = true;
1025 ((struct nds32_reg *)reg_cache->reg_list[IR3].arch_info)->enable = true;
1026 ((struct nds32_reg *)reg_cache->reg_list[IR4].arch_info)->enable = true;
1027 ((struct nds32_reg *)reg_cache->reg_list[IR6].arch_info)->enable = true;
1028 ((struct nds32_reg *)reg_cache->reg_list[IR9].arch_info)->enable = true;
1029 ((struct nds32_reg *)reg_cache->reg_list[IR11].arch_info)->enable = true;
1030 ((struct nds32_reg *)reg_cache->reg_list[IR14].arch_info)->enable = true;
1031 ((struct nds32_reg *)reg_cache->reg_list[IR15].arch_info)->enable = true;
1032
1033 /** MUST have MMU system registers */
1034 ((struct nds32_reg *)reg_cache->reg_list[MR0].arch_info)->enable = true;
1035
1036 /** MUST have EDM system registers */
1037 ((struct nds32_reg *)reg_cache->reg_list[DR40].arch_info)->enable = true;
1038 ((struct nds32_reg *)reg_cache->reg_list[DR42].arch_info)->enable = true;
1039 }
1040
1041 static int nds32_init_memory_config(struct nds32 *nds32)
1042 {
1043 uint32_t value_cr1; /* ICM_CFG */
1044 uint32_t value_cr2; /* DCM_CFG */
1045 struct nds32_memory *memory = &(nds32->memory);
1046
1047 /* read $cr1 to init instruction memory information */
1048 nds32_get_mapped_reg(nds32, CR1, &value_cr1);
1049 memory->icache.set = value_cr1 & 0x7;
1050 memory->icache.way = (value_cr1 >> 3) & 0x7;
1051 memory->icache.line_size = (value_cr1 >> 6) & 0x7;
1052 memory->icache.lock_support = (value_cr1 >> 9) & 0x1;
1053
1054 memory->ilm_base = (value_cr1 >> 10) & 0x7;
1055 memory->ilm_align_ver = (value_cr1 >> 13) & 0x3;
1056
1057 /* read $cr2 to init data memory information */
1058 nds32_get_mapped_reg(nds32, CR2, &value_cr2);
1059 memory->dcache.set = value_cr2 & 0x7;
1060 memory->dcache.way = (value_cr2 >> 3) & 0x7;
1061 memory->dcache.line_size = (value_cr2 >> 6) & 0x7;
1062 memory->dcache.lock_support = (value_cr2 >> 9) & 0x1;
1063
1064 memory->dlm_base = (value_cr2 >> 10) & 0x7;
1065 memory->dlm_align_ver = (value_cr2 >> 13) & 0x3;
1066
1067 return ERROR_OK;
1068 }
1069
1070 static void nds32_init_config(struct nds32 *nds32)
1071 {
1072 uint32_t value_cr0;
1073 uint32_t value_cr3;
1074 uint32_t value_cr4;
1075 struct nds32_cpu_version *cpu_version = &(nds32->cpu_version);
1076 struct nds32_mmu_config *mmu_config = &(nds32->mmu_config);
1077 struct nds32_misc_config *misc_config = &(nds32->misc_config);
1078
1079 nds32_get_mapped_reg(nds32, CR0, &value_cr0);
1080 nds32_get_mapped_reg(nds32, CR3, &value_cr3);
1081 nds32_get_mapped_reg(nds32, CR4, &value_cr4);
1082
1083 /* config cpu version */
1084 cpu_version->performance_extension = value_cr0 & 0x1;
1085 cpu_version->_16bit_extension = (value_cr0 >> 1) & 0x1;
1086 cpu_version->performance_extension_2 = (value_cr0 >> 2) & 0x1;
1087 cpu_version->cop_fpu_extension = (value_cr0 >> 3) & 0x1;
1088 cpu_version->string_extension = (value_cr0 >> 4) & 0x1;
1089 cpu_version->revision = (value_cr0 >> 16) & 0xFF;
1090 cpu_version->cpu_id_family = (value_cr0 >> 24) & 0xF;
1091 cpu_version->cpu_id_version = (value_cr0 >> 28) & 0xF;
1092
1093 /* config MMU */
1094 mmu_config->memory_protection = value_cr3 & 0x3;
1095 mmu_config->memory_protection_version = (value_cr3 >> 2) & 0x1F;
1096 mmu_config->fully_associative_tlb = (value_cr3 >> 7) & 0x1;
1097 if (mmu_config->fully_associative_tlb) {
1098 mmu_config->tlb_size = (value_cr3 >> 8) & 0x7F;
1099 } else {
1100 mmu_config->tlb_ways = (value_cr3 >> 8) & 0x7;
1101 mmu_config->tlb_sets = (value_cr3 >> 11) & 0x7;
1102 }
1103 mmu_config->_8k_page_support = (value_cr3 >> 15) & 0x1;
1104 mmu_config->extra_page_size_support = (value_cr3 >> 16) & 0xFF;
1105 mmu_config->tlb_lock = (value_cr3 >> 24) & 0x1;
1106 mmu_config->hardware_page_table_walker = (value_cr3 >> 25) & 0x1;
1107 mmu_config->default_endian = (value_cr3 >> 26) & 0x1;
1108 mmu_config->partition_num = (value_cr3 >> 27) & 0x1;
1109 mmu_config->invisible_tlb = (value_cr3 >> 28) & 0x1;
1110 mmu_config->vlpt = (value_cr3 >> 29) & 0x1;
1111 mmu_config->ntme = (value_cr3 >> 30) & 0x1;
1112 mmu_config->drde = (value_cr3 >> 31) & 0x1;
1113
1114 /* config misc */
1115 misc_config->edm = value_cr4 & 0x1;
1116 misc_config->local_memory_dma = (value_cr4 >> 1) & 0x1;
1117 misc_config->performance_monitor = (value_cr4 >> 2) & 0x1;
1118 misc_config->high_speed_memory_port = (value_cr4 >> 3) & 0x1;
1119 misc_config->debug_tracer = (value_cr4 >> 4) & 0x1;
1120 misc_config->div_instruction = (value_cr4 >> 5) & 0x1;
1121 misc_config->mac_instruction = (value_cr4 >> 6) & 0x1;
1122 misc_config->audio_isa = (value_cr4 >> 7) & 0x3;
1123 misc_config->l2_cache = (value_cr4 >> 9) & 0x1;
1124 misc_config->reduce_register = (value_cr4 >> 10) & 0x1;
1125 misc_config->addr_24 = (value_cr4 >> 11) & 0x1;
1126 misc_config->interruption_level = (value_cr4 >> 12) & 0x1;
1127 misc_config->baseline_instruction = (value_cr4 >> 13) & 0x7;
1128 misc_config->no_dx_register = (value_cr4 >> 16) & 0x1;
1129 misc_config->implement_dependant_register = (value_cr4 >> 17) & 0x1;
1130 misc_config->implement_dependant_sr_encoding = (value_cr4 >> 18) & 0x1;
1131 misc_config->ifc = (value_cr4 >> 19) & 0x1;
1132 misc_config->mcu = (value_cr4 >> 20) & 0x1;
1133 misc_config->shadow = (value_cr4 >> 21) & 0x7;
1134 misc_config->ex9 = (value_cr4 >> 24) & 0x1;
1135
1136 nds32_init_memory_config(nds32);
1137 }
1138
1139 static int nds32_init_option_registers(struct nds32 *nds32)
1140 {
1141 struct reg_cache *reg_cache = nds32->core_cache;
1142 struct nds32_cpu_version *cpu_version = &(nds32->cpu_version);
1143 struct nds32_mmu_config *mmu_config = &(nds32->mmu_config);
1144 struct nds32_misc_config *misc_config = &(nds32->misc_config);
1145 struct nds32_memory *memory_config = &(nds32->memory);
1146
1147 bool no_cr5;
1148 bool mr10_exist;
1149 bool no_racr0;
1150
1151 if (((cpu_version->cpu_id_family == 0xC) || (cpu_version->cpu_id_family == 0xD)) &&
1152 ((cpu_version->revision & 0xFC) == 0)) {
1153 no_cr5 = true;
1154 mr10_exist = true;
1155 no_racr0 = true;
1156 } else {
1157 no_cr5 = false;
1158 mr10_exist = false;
1159 no_racr0 = false;
1160 }
1161
1162 if (misc_config->reduce_register == false) {
1163 ((struct nds32_reg *)reg_cache->reg_list[R11].arch_info)->enable = true;
1164 ((struct nds32_reg *)reg_cache->reg_list[R12].arch_info)->enable = true;
1165 ((struct nds32_reg *)reg_cache->reg_list[R13].arch_info)->enable = true;
1166 ((struct nds32_reg *)reg_cache->reg_list[R14].arch_info)->enable = true;
1167 ((struct nds32_reg *)reg_cache->reg_list[R16].arch_info)->enable = true;
1168 ((struct nds32_reg *)reg_cache->reg_list[R17].arch_info)->enable = true;
1169 ((struct nds32_reg *)reg_cache->reg_list[R18].arch_info)->enable = true;
1170 ((struct nds32_reg *)reg_cache->reg_list[R19].arch_info)->enable = true;
1171 ((struct nds32_reg *)reg_cache->reg_list[R20].arch_info)->enable = true;
1172 ((struct nds32_reg *)reg_cache->reg_list[R21].arch_info)->enable = true;
1173 ((struct nds32_reg *)reg_cache->reg_list[R22].arch_info)->enable = true;
1174 ((struct nds32_reg *)reg_cache->reg_list[R23].arch_info)->enable = true;
1175 ((struct nds32_reg *)reg_cache->reg_list[R24].arch_info)->enable = true;
1176 ((struct nds32_reg *)reg_cache->reg_list[R25].arch_info)->enable = true;
1177 ((struct nds32_reg *)reg_cache->reg_list[R26].arch_info)->enable = true;
1178 ((struct nds32_reg *)reg_cache->reg_list[R27].arch_info)->enable = true;
1179 }
1180
1181 if (misc_config->no_dx_register == false) {
1182 ((struct nds32_reg *)reg_cache->reg_list[D0LO].arch_info)->enable = true;
1183 ((struct nds32_reg *)reg_cache->reg_list[D0HI].arch_info)->enable = true;
1184 ((struct nds32_reg *)reg_cache->reg_list[D1LO].arch_info)->enable = true;
1185 ((struct nds32_reg *)reg_cache->reg_list[D1HI].arch_info)->enable = true;
1186 }
1187
1188 if (misc_config->ex9)
1189 ((struct nds32_reg *)reg_cache->reg_list[ITB].arch_info)->enable = true;
1190
1191 if (no_cr5 == false)
1192 ((struct nds32_reg *)reg_cache->reg_list[CR5].arch_info)->enable = true;
1193
1194 if (cpu_version->cop_fpu_extension) {
1195 ((struct nds32_reg *)reg_cache->reg_list[CR6].arch_info)->enable = true;
1196 ((struct nds32_reg *)reg_cache->reg_list[FPCSR].arch_info)->enable = true;
1197 ((struct nds32_reg *)reg_cache->reg_list[FPCFG].arch_info)->enable = true;
1198 }
1199
1200 if (mmu_config->memory_protection == 1) {
1201 /* Secure MPU has no IPC, IPSW, P_ITYPE */
1202 ((struct nds32_reg *)reg_cache->reg_list[IR1].arch_info)->enable = false;
1203 ((struct nds32_reg *)reg_cache->reg_list[IR9].arch_info)->enable = false;
1204 }
1205
1206 if (nds32->privilege_level != 0)
1207 ((struct nds32_reg *)reg_cache->reg_list[IR3].arch_info)->enable = false;
1208
1209 if (misc_config->mcu == true)
1210 ((struct nds32_reg *)reg_cache->reg_list[IR4].arch_info)->enable = false;
1211
1212 if (misc_config->interruption_level == false) {
1213 ((struct nds32_reg *)reg_cache->reg_list[IR2].arch_info)->enable = true;
1214 ((struct nds32_reg *)reg_cache->reg_list[IR5].arch_info)->enable = true;
1215 ((struct nds32_reg *)reg_cache->reg_list[IR10].arch_info)->enable = true;
1216 ((struct nds32_reg *)reg_cache->reg_list[IR12].arch_info)->enable = true;
1217 ((struct nds32_reg *)reg_cache->reg_list[IR13].arch_info)->enable = true;
1218
1219 /* Secure MPU has no IPC, IPSW, P_ITYPE */
1220 if (mmu_config->memory_protection != 1)
1221 ((struct nds32_reg *)reg_cache->reg_list[IR7].arch_info)->enable = true;
1222 }
1223
1224 if ((cpu_version->cpu_id_family == 0x9) ||
1225 (cpu_version->cpu_id_family == 0xA) ||
1226 (cpu_version->cpu_id_family == 0xC) ||
1227 (cpu_version->cpu_id_family == 0xD))
1228 ((struct nds32_reg *)reg_cache->reg_list[IR8].arch_info)->enable = true;
1229
1230 if (misc_config->shadow == 1) {
1231 ((struct nds32_reg *)reg_cache->reg_list[IR16].arch_info)->enable = true;
1232 ((struct nds32_reg *)reg_cache->reg_list[IR17].arch_info)->enable = true;
1233 }
1234
1235 if (misc_config->ifc)
1236 ((struct nds32_reg *)reg_cache->reg_list[IFC_LP].arch_info)->enable = true;
1237
1238 if (nds32->privilege_level != 0)
1239 ((struct nds32_reg *)reg_cache->reg_list[MR0].arch_info)->enable = false;
1240
1241 if (mmu_config->memory_protection == 1) {
1242 if (mmu_config->memory_protection_version == 24)
1243 ((struct nds32_reg *)reg_cache->reg_list[MR4].arch_info)->enable = true;
1244
1245 if (nds32->privilege_level == 0) {
1246 if ((mmu_config->memory_protection_version == 16) ||
1247 (mmu_config->memory_protection_version == 24)) {
1248 ((struct nds32_reg *)reg_cache->reg_list[MR11].arch_info)->enable = true;
1249 ((struct nds32_reg *)reg_cache->reg_list[SECUR0].arch_info)->enable = true;
1250 ((struct nds32_reg *)reg_cache->reg_list[IR20].arch_info)->enable = true;
1251 ((struct nds32_reg *)reg_cache->reg_list[IR22].arch_info)->enable = true;
1252 ((struct nds32_reg *)reg_cache->reg_list[IR24].arch_info)->enable = true;
1253 ((struct nds32_reg *)reg_cache->reg_list[IR30].arch_info)->enable = true;
1254
1255 if (misc_config->shadow == 1) {
1256 ((struct nds32_reg *)reg_cache->reg_list[IR21].arch_info)->enable = true;
1257 ((struct nds32_reg *)reg_cache->reg_list[IR23].arch_info)->enable = true;
1258 ((struct nds32_reg *)reg_cache->reg_list[IR25].arch_info)->enable = true;
1259 }
1260 }
1261 }
1262 } else if (mmu_config->memory_protection == 2) {
1263 ((struct nds32_reg *)reg_cache->reg_list[MR1].arch_info)->enable = true;
1264 ((struct nds32_reg *)reg_cache->reg_list[MR4].arch_info)->enable = true;
1265
1266 if ((cpu_version->cpu_id_family != 0xA) && (cpu_version->cpu_id_family != 0xC) &&
1267 (cpu_version->cpu_id_family != 0xD))
1268 ((struct nds32_reg *)reg_cache->reg_list[MR5].arch_info)->enable = true;
1269 }
1270
1271 if (mmu_config->memory_protection > 0) {
1272 ((struct nds32_reg *)reg_cache->reg_list[MR2].arch_info)->enable = true;
1273 ((struct nds32_reg *)reg_cache->reg_list[MR3].arch_info)->enable = true;
1274 }
1275
1276 if (memory_config->ilm_base != 0)
1277 if (nds32->privilege_level == 0)
1278 ((struct nds32_reg *)reg_cache->reg_list[MR6].arch_info)->enable = true;
1279
1280 if (memory_config->dlm_base != 0)
1281 if (nds32->privilege_level == 0)
1282 ((struct nds32_reg *)reg_cache->reg_list[MR7].arch_info)->enable = true;
1283
1284 if ((memory_config->icache.line_size != 0) && (memory_config->dcache.line_size != 0))
1285 ((struct nds32_reg *)reg_cache->reg_list[MR8].arch_info)->enable = true;
1286
1287 if (misc_config->high_speed_memory_port)
1288 ((struct nds32_reg *)reg_cache->reg_list[MR9].arch_info)->enable = true;
1289
1290 if (mr10_exist)
1291 ((struct nds32_reg *)reg_cache->reg_list[MR10].arch_info)->enable = true;
1292
1293 if (misc_config->edm) {
1294 int dr_reg_n = nds32->edm.breakpoint_num * 5;
1295
1296 for (int i = 0 ; i < dr_reg_n ; i++)
1297 ((struct nds32_reg *)reg_cache->reg_list[DR0 + i].arch_info)->enable = true;
1298
1299 ((struct nds32_reg *)reg_cache->reg_list[DR41].arch_info)->enable = true;
1300 ((struct nds32_reg *)reg_cache->reg_list[DR43].arch_info)->enable = true;
1301 ((struct nds32_reg *)reg_cache->reg_list[DR44].arch_info)->enable = true;
1302 ((struct nds32_reg *)reg_cache->reg_list[DR45].arch_info)->enable = true;
1303 }
1304
1305 if (misc_config->debug_tracer) {
1306 ((struct nds32_reg *)reg_cache->reg_list[DR46].arch_info)->enable = true;
1307 ((struct nds32_reg *)reg_cache->reg_list[DR47].arch_info)->enable = true;
1308 }
1309
1310 if (misc_config->performance_monitor) {
1311 ((struct nds32_reg *)reg_cache->reg_list[PFR0].arch_info)->enable = true;
1312 ((struct nds32_reg *)reg_cache->reg_list[PFR1].arch_info)->enable = true;
1313 ((struct nds32_reg *)reg_cache->reg_list[PFR2].arch_info)->enable = true;
1314 ((struct nds32_reg *)reg_cache->reg_list[PFR3].arch_info)->enable = true;
1315 }
1316
1317 if (misc_config->local_memory_dma) {
1318 ((struct nds32_reg *)reg_cache->reg_list[DMAR0].arch_info)->enable = true;
1319 ((struct nds32_reg *)reg_cache->reg_list[DMAR1].arch_info)->enable = true;
1320 ((struct nds32_reg *)reg_cache->reg_list[DMAR2].arch_info)->enable = true;
1321 ((struct nds32_reg *)reg_cache->reg_list[DMAR3].arch_info)->enable = true;
1322 ((struct nds32_reg *)reg_cache->reg_list[DMAR4].arch_info)->enable = true;
1323 ((struct nds32_reg *)reg_cache->reg_list[DMAR5].arch_info)->enable = true;
1324 ((struct nds32_reg *)reg_cache->reg_list[DMAR6].arch_info)->enable = true;
1325 ((struct nds32_reg *)reg_cache->reg_list[DMAR7].arch_info)->enable = true;
1326 ((struct nds32_reg *)reg_cache->reg_list[DMAR8].arch_info)->enable = true;
1327 ((struct nds32_reg *)reg_cache->reg_list[DMAR9].arch_info)->enable = true;
1328 ((struct nds32_reg *)reg_cache->reg_list[DMAR10].arch_info)->enable = true;
1329 }
1330
1331 if ((misc_config->local_memory_dma || misc_config->performance_monitor) &&
1332 (no_racr0 == false))
1333 ((struct nds32_reg *)reg_cache->reg_list[RACR].arch_info)->enable = true;
1334
1335 if (cpu_version->cop_fpu_extension || (misc_config->audio_isa != 0))
1336 ((struct nds32_reg *)reg_cache->reg_list[FUCPR].arch_info)->enable = true;
1337
1338 if (misc_config->audio_isa != 0) {
1339 if (misc_config->audio_isa > 1) {
1340 ((struct nds32_reg *)reg_cache->reg_list[D0L24].arch_info)->enable = true;
1341 ((struct nds32_reg *)reg_cache->reg_list[D1L24].arch_info)->enable = true;
1342 }
1343
1344 ((struct nds32_reg *)reg_cache->reg_list[I0].arch_info)->enable = true;
1345 ((struct nds32_reg *)reg_cache->reg_list[I1].arch_info)->enable = true;
1346 ((struct nds32_reg *)reg_cache->reg_list[I2].arch_info)->enable = true;
1347 ((struct nds32_reg *)reg_cache->reg_list[I3].arch_info)->enable = true;
1348 ((struct nds32_reg *)reg_cache->reg_list[I4].arch_info)->enable = true;
1349 ((struct nds32_reg *)reg_cache->reg_list[I5].arch_info)->enable = true;
1350 ((struct nds32_reg *)reg_cache->reg_list[I6].arch_info)->enable = true;
1351 ((struct nds32_reg *)reg_cache->reg_list[I7].arch_info)->enable = true;
1352 ((struct nds32_reg *)reg_cache->reg_list[M1].arch_info)->enable = true;
1353 ((struct nds32_reg *)reg_cache->reg_list[M2].arch_info)->enable = true;
1354 ((struct nds32_reg *)reg_cache->reg_list[M3].arch_info)->enable = true;
1355 ((struct nds32_reg *)reg_cache->reg_list[M5].arch_info)->enable = true;
1356 ((struct nds32_reg *)reg_cache->reg_list[M6].arch_info)->enable = true;
1357 ((struct nds32_reg *)reg_cache->reg_list[M7].arch_info)->enable = true;
1358 ((struct nds32_reg *)reg_cache->reg_list[MOD].arch_info)->enable = true;
1359 ((struct nds32_reg *)reg_cache->reg_list[LBE].arch_info)->enable = true;
1360 ((struct nds32_reg *)reg_cache->reg_list[LE].arch_info)->enable = true;
1361 ((struct nds32_reg *)reg_cache->reg_list[LC].arch_info)->enable = true;
1362 ((struct nds32_reg *)reg_cache->reg_list[ADM_VBASE].arch_info)->enable = true;
1363 ((struct nds32_reg *)reg_cache->reg_list[SHFT_CTL0].arch_info)->enable = true;
1364 ((struct nds32_reg *)reg_cache->reg_list[SHFT_CTL1].arch_info)->enable = true;
1365
1366 uint32_t value_mod;
1367 uint32_t fucpr_backup;
1368 /* enable fpu and get configuration */
1369 nds32_get_mapped_reg(nds32, FUCPR, &fucpr_backup);
1370 if ((fucpr_backup & 0x80000000) == 0)
1371 nds32_set_mapped_reg(nds32, FUCPR, fucpr_backup | 0x80000000);
1372 nds32_get_mapped_reg(nds32, MOD, &value_mod);
1373 /* restore origin fucpr value */
1374 if ((fucpr_backup & 0x80000000) == 0)
1375 nds32_set_mapped_reg(nds32, FUCPR, fucpr_backup);
1376
1377 if ((value_mod >> 6) & 0x1) {
1378 ((struct nds32_reg *)reg_cache->reg_list[CB_CTL].arch_info)->enable = true;
1379 ((struct nds32_reg *)reg_cache->reg_list[CBB0].arch_info)->enable = true;
1380 ((struct nds32_reg *)reg_cache->reg_list[CBB1].arch_info)->enable = true;
1381 ((struct nds32_reg *)reg_cache->reg_list[CBB2].arch_info)->enable = true;
1382 ((struct nds32_reg *)reg_cache->reg_list[CBB3].arch_info)->enable = true;
1383 ((struct nds32_reg *)reg_cache->reg_list[CBE0].arch_info)->enable = true;
1384 ((struct nds32_reg *)reg_cache->reg_list[CBE1].arch_info)->enable = true;
1385 ((struct nds32_reg *)reg_cache->reg_list[CBE2].arch_info)->enable = true;
1386 ((struct nds32_reg *)reg_cache->reg_list[CBE3].arch_info)->enable = true;
1387 }
1388 }
1389
1390 if ((cpu_version->cpu_id_family == 0x9) ||
1391 (cpu_version->cpu_id_family == 0xA) ||
1392 (cpu_version->cpu_id_family == 0xC)) {
1393
1394 ((struct nds32_reg *)reg_cache->reg_list[IDR0].arch_info)->enable = true;
1395 ((struct nds32_reg *)reg_cache->reg_list[IDR1].arch_info)->enable = true;
1396
1397 if ((cpu_version->cpu_id_family == 0xC) && (cpu_version->revision == 0x0C))
1398 ((struct nds32_reg *)reg_cache->reg_list[IDR0].arch_info)->enable = false;
1399 }
1400
1401 uint32_t ir3_value;
1402 uint32_t ivb_prog_pri_lvl;
1403 uint32_t ivb_ivic_ver;
1404
1405 nds32_get_mapped_reg(nds32, IR3, &ir3_value);
1406 ivb_prog_pri_lvl = ir3_value & 0x1;
1407 ivb_ivic_ver = (ir3_value >> 11) & 0x3;
1408
1409 if ((ivb_prog_pri_lvl == 1) || (ivb_ivic_ver >= 1)) {
1410 ((struct nds32_reg *)reg_cache->reg_list[IR18].arch_info)->enable = true;
1411 ((struct nds32_reg *)reg_cache->reg_list[IR19].arch_info)->enable = true;
1412 }
1413
1414 if (ivb_ivic_ver >= 1) {
1415 ((struct nds32_reg *)reg_cache->reg_list[IR26].arch_info)->enable = true;
1416 ((struct nds32_reg *)reg_cache->reg_list[IR27].arch_info)->enable = true;
1417 ((struct nds32_reg *)reg_cache->reg_list[IR28].arch_info)->enable = true;
1418 ((struct nds32_reg *)reg_cache->reg_list[IR29].arch_info)->enable = true;
1419 }
1420
1421 return ERROR_OK;
1422 }
1423
1424 int nds32_init_register_table(struct nds32 *nds32)
1425 {
1426 nds32_init_must_have_registers(nds32);
1427
1428 return ERROR_OK;
1429 }
1430
1431 int nds32_add_software_breakpoint(struct target *target,
1432 struct breakpoint *breakpoint)
1433 {
1434 uint32_t data;
1435 uint32_t check_data;
1436 uint32_t break_insn;
1437
1438 /* check the breakpoint size */
1439 target->type->read_buffer(target, breakpoint->address, 4, (uint8_t *)&data);
1440
1441 /* backup origin instruction
1442 * instruction is big-endian */
1443 if (*(char *)&data & 0x80) { /* 16-bits instruction */
1444 breakpoint->length = 2;
1445 break_insn = NDS32_BREAK_16;
1446 } else { /* 32-bits instruction */
1447 breakpoint->length = 4;
1448 break_insn = NDS32_BREAK_32;
1449 }
1450
1451 free(breakpoint->orig_instr);
1452
1453 breakpoint->orig_instr = malloc(breakpoint->length);
1454 memcpy(breakpoint->orig_instr, &data, breakpoint->length);
1455
1456 /* self-modified code */
1457 target->type->write_buffer(target, breakpoint->address, breakpoint->length, (const uint8_t *)&break_insn);
1458 /* write_back & invalidate dcache & invalidate icache */
1459 nds32_cache_sync(target, breakpoint->address, breakpoint->length);
1460
1461 /* read back to check */
1462 target->type->read_buffer(target, breakpoint->address, breakpoint->length, (uint8_t *)&check_data);
1463 if (memcmp(&check_data, &break_insn, breakpoint->length) == 0)
1464 return ERROR_OK;
1465
1466 return ERROR_FAIL;
1467 }
1468
1469 int nds32_remove_software_breakpoint(struct target *target,
1470 struct breakpoint *breakpoint)
1471 {
1472 uint32_t check_data;
1473 uint32_t break_insn;
1474
1475 if (breakpoint->length == 2)
1476 break_insn = NDS32_BREAK_16;
1477 else if (breakpoint->length == 4)
1478 break_insn = NDS32_BREAK_32;
1479 else
1480 return ERROR_FAIL;
1481
1482 target->type->read_buffer(target, breakpoint->address, breakpoint->length,
1483 (uint8_t *)&check_data);
1484
1485 /* break instruction is modified */
1486 if (memcmp(&check_data, &break_insn, breakpoint->length) != 0)
1487 return ERROR_FAIL;
1488
1489 /* self-modified code */
1490 target->type->write_buffer(target, breakpoint->address, breakpoint->length,
1491 breakpoint->orig_instr);
1492
1493 /* write_back & invalidate dcache & invalidate icache */
1494 nds32_cache_sync(target, breakpoint->address, breakpoint->length);
1495
1496 return ERROR_OK;
1497 }
1498
1499 /**
1500 * Restore the processor context on an Andes target. The full processor
1501 * context is analyzed to see if any of the registers are dirty on this end, but
1502 * have a valid new value. If this is the case, the processor is changed to the
1503 * appropriate mode and the new register values are written out to the
1504 * processor. If there happens to be a dirty register with an invalid value, an
1505 * error will be logged.
1506 *
1507 * @param target Pointer to the Andes target to have its context restored
1508 * @return Error status if the target is not halted.
1509 */
1510 int nds32_restore_context(struct target *target)
1511 {
1512 struct nds32 *nds32 = target_to_nds32(target);
1513 struct aice_port_s *aice = target_to_aice(target);
1514 struct reg_cache *reg_cache = nds32->core_cache;
1515 struct reg *reg;
1516 struct nds32_reg *reg_arch_info;
1517 unsigned int i;
1518
1519 LOG_DEBUG("-");
1520
1521 if (target->state != TARGET_HALTED) {
1522 LOG_WARNING("target not halted");
1523 return ERROR_TARGET_NOT_HALTED;
1524 }
1525
1526 /* check if there are dirty registers */
1527 for (i = 0; i < reg_cache->num_regs; i++) {
1528 reg = &(reg_cache->reg_list[i]);
1529 if (reg->dirty == true) {
1530 if (reg->valid == true) {
1531
1532 LOG_DEBUG("examining dirty reg: %s", reg->name);
1533 LOG_DEBUG("writing register %d with value 0x%8.8" PRIx32,
1534 i, buf_get_u32(reg->value, 0, 32));
1535
1536 reg_arch_info = reg->arch_info;
1537 if (reg_arch_info->num >= FD0 && reg_arch_info->num <= FD31) {
1538 uint64_t val = buf_get_u64(reg_arch_info->value, 0, 64);
1539 aice_write_reg_64(aice, reg_arch_info->num, val);
1540 } else {
1541 uint32_t val = buf_get_u32(reg_arch_info->value, 0, 32);
1542 aice_write_register(aice, reg_arch_info->num, val);
1543 }
1544
1545 reg->valid = true;
1546 reg->dirty = false;
1547 }
1548 }
1549 }
1550
1551 return ERROR_OK;
1552 }
1553
1554 int nds32_edm_config(struct nds32 *nds32)
1555 {
1556 struct target *target = nds32->target;
1557 struct aice_port_s *aice = target_to_aice(target);
1558 uint32_t edm_cfg;
1559 uint32_t edm_ctl;
1560
1561 aice_read_debug_reg(aice, NDS_EDM_SR_EDM_CFG, &edm_cfg);
1562
1563 nds32->edm.version = (edm_cfg >> 16) & 0xFFFF;
1564 LOG_INFO("EDM version 0x%04x", nds32->edm.version);
1565
1566 nds32->edm.breakpoint_num = (edm_cfg & 0x7) + 1;
1567
1568 if ((nds32->edm.version & 0x1000) || (nds32->edm.version >= 0x60))
1569 nds32->edm.access_control = true;
1570 else
1571 nds32->edm.access_control = false;
1572
1573 if ((edm_cfg >> 4) & 0x1)
1574 nds32->edm.direct_access_local_memory = true;
1575 else
1576 nds32->edm.direct_access_local_memory = false;
1577
1578 if (nds32->edm.version <= 0x20)
1579 nds32->edm.direct_access_local_memory = false;
1580
1581 aice_read_debug_reg(aice, NDS_EDM_SR_EDM_CTL, &edm_ctl);
1582 if (edm_ctl & (0x1 << 29))
1583 nds32->edm.support_max_stop = true;
1584 else
1585 nds32->edm.support_max_stop = false;
1586
1587 /* set passcode for secure MCU */
1588 nds32_login(nds32);
1589
1590 return ERROR_OK;
1591 }
1592
1593 int nds32_config(struct nds32 *nds32)
1594 {
1595 nds32_init_config(nds32);
1596
1597 /* init optional system registers according to config registers */
1598 nds32_init_option_registers(nds32);
1599
1600 /* get max interrupt level */
1601 if (nds32->misc_config.interruption_level)
1602 nds32->max_interrupt_level = 2;
1603 else
1604 nds32->max_interrupt_level = 3;
1605
1606 /* get ILM/DLM size from MR6/MR7 */
1607 uint32_t value_mr6, value_mr7;
1608 uint32_t size_index;
1609 nds32_get_mapped_reg(nds32, MR6, &value_mr6);
1610 size_index = (value_mr6 >> 1) & 0xF;
1611 nds32->memory.ilm_size = nds32_lm_size_table[size_index];
1612
1613 nds32_get_mapped_reg(nds32, MR7, &value_mr7);
1614 size_index = (value_mr7 >> 1) & 0xF;
1615 nds32->memory.dlm_size = nds32_lm_size_table[size_index];
1616
1617 return ERROR_OK;
1618 }
1619
1620 int nds32_init_arch_info(struct target *target, struct nds32 *nds32)
1621 {
1622 target->arch_info = nds32;
1623 nds32->target = target;
1624
1625 nds32->common_magic = NDS32_COMMON_MAGIC;
1626 nds32->init_arch_info_after_halted = false;
1627 nds32->auto_convert_hw_bp = true;
1628 nds32->global_stop = false;
1629 nds32->soft_reset_halt = false;
1630 nds32->edm_passcode = NULL;
1631 nds32->privilege_level = 0;
1632 nds32->boot_time = 1500;
1633 nds32->reset_halt_as_examine = false;
1634 nds32->keep_target_edm_ctl = false;
1635 nds32->word_access_mem = false;
1636 nds32->virtual_hosting = true;
1637 nds32->hit_syscall = false;
1638 nds32->active_syscall_id = NDS32_SYSCALL_UNDEFINED;
1639 nds32->virtual_hosting_errno = 0;
1640 nds32->virtual_hosting_ctrl_c = false;
1641 nds32->attached = false;
1642
1643 nds32->syscall_break.asid = 0;
1644 nds32->syscall_break.length = 4;
1645 nds32->syscall_break.is_set = false;
1646 nds32->syscall_break.orig_instr = NULL;
1647 nds32->syscall_break.next = NULL;
1648 nds32->syscall_break.unique_id = 0x515CAll + target->target_number;
1649 nds32->syscall_break.linked_brp = 0;
1650
1651 nds32_reg_init();
1652
1653 if (nds32_reg_cache_init(target, nds32) == ERROR_FAIL)
1654 return ERROR_FAIL;
1655
1656 if (nds32_init_register_table(nds32) != ERROR_OK)
1657 return ERROR_FAIL;
1658
1659 return ERROR_OK;
1660 }
1661
1662 int nds32_virtual_to_physical(struct target *target, target_addr_t address, target_addr_t *physical)
1663 {
1664 struct nds32 *nds32 = target_to_nds32(target);
1665
1666 if (nds32->memory.address_translation == false) {
1667 *physical = address;
1668 return ERROR_OK;
1669 }
1670
1671 if (nds32_probe_tlb(nds32, address, physical) == ERROR_OK)
1672 return ERROR_OK;
1673
1674 if (nds32_walk_page_table(nds32, address, physical) == ERROR_OK)
1675 return ERROR_OK;
1676
1677 return ERROR_FAIL;
1678 }
1679
1680 int nds32_cache_sync(struct target *target, target_addr_t address, uint32_t length)
1681 {
1682 struct aice_port_s *aice = target_to_aice(target);
1683 struct nds32 *nds32 = target_to_nds32(target);
1684 struct nds32_cache *dcache = &(nds32->memory.dcache);
1685 struct nds32_cache *icache = &(nds32->memory.icache);
1686 uint32_t dcache_line_size = nds32_line_size_table[dcache->line_size];
1687 uint32_t icache_line_size = nds32_line_size_table[icache->line_size];
1688 uint32_t cur_address;
1689 int result;
1690 uint32_t start_line, end_line;
1691 uint32_t cur_line;
1692
1693 if ((dcache->line_size != 0) && (dcache->enable == true)) {
1694 /* address / dcache_line_size */
1695 start_line = address >> (dcache->line_size + 2);
1696 /* (address + length - 1) / dcache_line_size */
1697 end_line = (address + length - 1) >> (dcache->line_size + 2);
1698
1699 for (cur_address = address, cur_line = start_line;
1700 cur_line <= end_line;
1701 cur_address += dcache_line_size, cur_line++) {
1702 /* D$ write back */
1703 result = aice_cache_ctl(aice, AICE_CACHE_CTL_L1D_VA_WB, cur_address);
1704 if (result != ERROR_OK)
1705 return result;
1706
1707 /* D$ invalidate */
1708 result = aice_cache_ctl(aice, AICE_CACHE_CTL_L1D_VA_INVAL, cur_address);
1709 if (result != ERROR_OK)
1710 return result;
1711 }
1712 }
1713
1714 if ((icache->line_size != 0) && (icache->enable == true)) {
1715 /* address / icache_line_size */
1716 start_line = address >> (icache->line_size + 2);
1717 /* (address + length - 1) / icache_line_size */
1718 end_line = (address + length - 1) >> (icache->line_size + 2);
1719
1720 for (cur_address = address, cur_line = start_line;
1721 cur_line <= end_line;
1722 cur_address += icache_line_size, cur_line++) {
1723 /* Because PSW.IT is turned off under debug exception, address MUST
1724 * be physical address. L1I_VA_INVALIDATE uses PSW.IT to decide
1725 * address translation or not. */
1726 target_addr_t physical_addr;
1727 if (target->type->virt2phys(target, cur_address, &physical_addr) == ERROR_FAIL)
1728 return ERROR_FAIL;
1729
1730 /* I$ invalidate */
1731 result = aice_cache_ctl(aice, AICE_CACHE_CTL_L1I_VA_INVAL, physical_addr);
1732 if (result != ERROR_OK)
1733 return result;
1734 }
1735 }
1736
1737 return ERROR_OK;
1738 }
1739
1740 uint32_t nds32_nextpc(struct nds32 *nds32, int current, uint32_t address)
1741 {
1742 if (!current)
1743 nds32_set_mapped_reg(nds32, PC, address);
1744 else
1745 nds32_get_mapped_reg(nds32, PC, &address);
1746
1747 return address;
1748 }
1749
1750 int nds32_step(struct target *target, int current,
1751 target_addr_t address, int handle_breakpoints)
1752 {
1753 LOG_DEBUG("target->state: %s",
1754 target_state_name(target));
1755
1756 if (target->state != TARGET_HALTED) {
1757 LOG_WARNING("target was not halted");
1758 return ERROR_TARGET_NOT_HALTED;
1759 }
1760
1761 struct nds32 *nds32 = target_to_nds32(target);
1762
1763 address = nds32_nextpc(nds32, current, address);
1764
1765 LOG_DEBUG("STEP PC %08" TARGET_PRIxADDR "%s", address, !current ? "!" : "");
1766
1767 /** set DSSIM */
1768 uint32_t ir14_value;
1769 nds32_get_mapped_reg(nds32, IR14, &ir14_value);
1770 if (nds32->step_isr_enable)
1771 ir14_value |= (0x1 << 31);
1772 else
1773 ir14_value &= ~(0x1 << 31);
1774 nds32_set_mapped_reg(nds32, IR14, ir14_value);
1775
1776 /* check hit_syscall before leave_debug_state() because
1777 * leave_debug_state() may clear hit_syscall flag */
1778 bool no_step = false;
1779 if (nds32->hit_syscall)
1780 /* step after hit_syscall should be ignored because
1781 * leave_debug_state will step implicitly to skip the
1782 * syscall */
1783 no_step = true;
1784
1785 /********* TODO: maybe create another function to handle this part */
1786 CHECK_RETVAL(nds32->leave_debug_state(nds32, true));
1787 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_RESUMED));
1788
1789 if (no_step == false) {
1790 struct aice_port_s *aice = target_to_aice(target);
1791 if (aice_step(aice) != ERROR_OK)
1792 return ERROR_FAIL;
1793 }
1794
1795 /* save state */
1796 CHECK_RETVAL(nds32->enter_debug_state(nds32, true));
1797 /********* TODO: maybe create another function to handle this part */
1798
1799 /* restore DSSIM */
1800 if (nds32->step_isr_enable) {
1801 nds32_get_mapped_reg(nds32, IR14, &ir14_value);
1802 ir14_value &= ~(0x1 << 31);
1803 nds32_set_mapped_reg(nds32, IR14, ir14_value);
1804 }
1805
1806 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_HALTED));
1807
1808 return ERROR_OK;
1809 }
1810
1811 static int nds32_step_without_watchpoint(struct nds32 *nds32)
1812 {
1813 struct target *target = nds32->target;
1814
1815 if (target->state != TARGET_HALTED) {
1816 LOG_WARNING("target was not halted");
1817 return ERROR_TARGET_NOT_HALTED;
1818 }
1819
1820 /** set DSSIM */
1821 uint32_t ir14_value;
1822 nds32_get_mapped_reg(nds32, IR14, &ir14_value);
1823 if (nds32->step_isr_enable)
1824 ir14_value |= (0x1 << 31);
1825 else
1826 ir14_value &= ~(0x1 << 31);
1827 nds32_set_mapped_reg(nds32, IR14, ir14_value);
1828
1829 /********* TODO: maybe create another function to handle this part */
1830 CHECK_RETVAL(nds32->leave_debug_state(nds32, false));
1831
1832 struct aice_port_s *aice = target_to_aice(target);
1833
1834 if (aice_step(aice) != ERROR_OK)
1835 return ERROR_FAIL;
1836
1837 /* save state */
1838 CHECK_RETVAL(nds32->enter_debug_state(nds32, false));
1839 /********* TODO: maybe create another function to handle this part */
1840
1841 /* restore DSSIM */
1842 if (nds32->step_isr_enable) {
1843 nds32_get_mapped_reg(nds32, IR14, &ir14_value);
1844 ir14_value &= ~(0x1 << 31);
1845 nds32_set_mapped_reg(nds32, IR14, ir14_value);
1846 }
1847
1848 return ERROR_OK;
1849 }
1850
1851 int nds32_target_state(struct nds32 *nds32, enum target_state *state)
1852 {
1853 struct aice_port_s *aice = target_to_aice(nds32->target);
1854 enum aice_target_state_s nds32_state;
1855
1856 if (aice_state(aice, &nds32_state) != ERROR_OK)
1857 return ERROR_FAIL;
1858
1859 switch (nds32_state) {
1860 case AICE_DISCONNECT:
1861 LOG_INFO("USB is disconnected");
1862 return ERROR_FAIL;
1863 case AICE_TARGET_DETACH:
1864 LOG_INFO("Target is disconnected");
1865 return ERROR_FAIL;
1866 case AICE_TARGET_UNKNOWN:
1867 *state = TARGET_UNKNOWN;
1868 break;
1869 case AICE_TARGET_RUNNING:
1870 *state = TARGET_RUNNING;
1871 break;
1872 case AICE_TARGET_HALTED:
1873 *state = TARGET_HALTED;
1874 break;
1875 case AICE_TARGET_RESET:
1876 *state = TARGET_RESET;
1877 break;
1878 case AICE_TARGET_DEBUG_RUNNING:
1879 *state = TARGET_DEBUG_RUNNING;
1880 break;
1881 default:
1882 return ERROR_FAIL;
1883 }
1884
1885 return ERROR_OK;
1886 }
1887
1888 int nds32_examine_debug_reason(struct nds32 *nds32)
1889 {
1890 uint32_t reason;
1891 struct target *target = nds32->target;
1892
1893 if (nds32->hit_syscall == true) {
1894 LOG_DEBUG("Hit syscall breakpoint");
1895 target->debug_reason = DBG_REASON_BREAKPOINT;
1896 return ERROR_OK;
1897 }
1898
1899 nds32->get_debug_reason(nds32, &reason);
1900
1901 LOG_DEBUG("nds32 examines debug reason: %s", nds32_debug_type_name[reason]);
1902
1903 /* Examine debug reason */
1904 switch (reason) {
1905 case NDS32_DEBUG_BREAK:
1906 case NDS32_DEBUG_BREAK_16:
1907 case NDS32_DEBUG_INST_BREAK:
1908 {
1909 uint32_t value_pc;
1910 uint32_t opcode;
1911 struct nds32_instruction instruction;
1912
1913 nds32_get_mapped_reg(nds32, PC, &value_pc);
1914
1915 if (nds32_read_opcode(nds32, value_pc, &opcode) != ERROR_OK)
1916 return ERROR_FAIL;
1917 if (nds32_evaluate_opcode(nds32, opcode, value_pc, &instruction) != ERROR_OK)
1918 return ERROR_FAIL;
1919
1920 /* hit 'break 0x7FFF' */
1921 if ((instruction.info.opc_6 == 0x32) &&
1922 (instruction.info.sub_opc == 0xA) &&
1923 (instruction.info.imm == 0x7FFF)) {
1924 target->debug_reason = DBG_REASON_EXIT;
1925 } else
1926 target->debug_reason = DBG_REASON_BREAKPOINT;
1927 }
1928 break;
1929 case NDS32_DEBUG_DATA_ADDR_WATCHPOINT_PRECISE:
1930 case NDS32_DEBUG_DATA_VALUE_WATCHPOINT_PRECISE:
1931 case NDS32_DEBUG_LOAD_STORE_GLOBAL_STOP: /* GLOBAL_STOP is precise exception */
1932 {
1933 int result;
1934
1935 result = nds32->get_watched_address(nds32,
1936 &(nds32->watched_address), reason);
1937 /* do single step(without watchpoints) to skip the "watched" instruction */
1938 nds32_step_without_watchpoint(nds32);
1939
1940 /* before single_step, save exception address */
1941 if (result != ERROR_OK)
1942 return ERROR_FAIL;
1943
1944 target->debug_reason = DBG_REASON_WATCHPOINT;
1945 }
1946 break;
1947 case NDS32_DEBUG_DEBUG_INTERRUPT:
1948 target->debug_reason = DBG_REASON_DBGRQ;
1949 break;
1950 case NDS32_DEBUG_HARDWARE_SINGLE_STEP:
1951 target->debug_reason = DBG_REASON_SINGLESTEP;
1952 break;
1953 case NDS32_DEBUG_DATA_VALUE_WATCHPOINT_IMPRECISE:
1954 case NDS32_DEBUG_DATA_ADDR_WATCHPOINT_NEXT_PRECISE:
1955 case NDS32_DEBUG_DATA_VALUE_WATCHPOINT_NEXT_PRECISE:
1956 if (nds32->get_watched_address(nds32, &(nds32->watched_address), reason) != ERROR_OK)
1957 return ERROR_FAIL;
1958
1959 target->debug_reason = DBG_REASON_WATCHPOINT;
1960 break;
1961 default:
1962 target->debug_reason = DBG_REASON_UNDEFINED;
1963 break;
1964 }
1965
1966 return ERROR_OK;
1967 }
1968
1969 int nds32_login(struct nds32 *nds32)
1970 {
1971 struct target *target = nds32->target;
1972 struct aice_port_s *aice = target_to_aice(target);
1973 uint32_t passcode_length;
1974 char command_sequence[129];
1975 char command_str[33];
1976 char code_str[9];
1977 uint32_t copy_length;
1978 uint32_t code;
1979 uint32_t i;
1980
1981 LOG_DEBUG("nds32_login");
1982
1983 if (nds32->edm_passcode) {
1984 /* convert EDM passcode to command sequences */
1985 passcode_length = strlen(nds32->edm_passcode);
1986 command_sequence[0] = '\0';
1987 for (i = 0; i < passcode_length; i += 8) {
1988 if (passcode_length - i < 8)
1989 copy_length = passcode_length - i;
1990 else
1991 copy_length = 8;
1992
1993 strncpy(code_str, nds32->edm_passcode + i, copy_length);
1994 code_str[copy_length] = '\0';
1995 code = strtoul(code_str, NULL, 16);
1996
1997 sprintf(command_str, "write_misc gen_port0 0x%" PRIx32 ";", code);
1998 strcat(command_sequence, command_str);
1999 }
2000
2001 if (aice_program_edm(aice, command_sequence) != ERROR_OK)
2002 return ERROR_FAIL;
2003
2004 /* get current privilege level */
2005 uint32_t value_edmsw;
2006 aice_read_debug_reg(aice, NDS_EDM_SR_EDMSW, &value_edmsw);
2007 nds32->privilege_level = (value_edmsw >> 16) & 0x3;
2008 LOG_INFO("Current privilege level: %d", nds32->privilege_level);
2009 }
2010
2011 if (nds32_edm_ops_num > 0) {
2012 const char *reg_name;
2013 for (i = 0 ; i < nds32_edm_ops_num ; i++) {
2014 code = nds32_edm_ops[i].value;
2015 if (nds32_edm_ops[i].reg_no == 6)
2016 reg_name = "gen_port0";
2017 else if (nds32_edm_ops[i].reg_no == 7)
2018 reg_name = "gen_port1";
2019 else
2020 return ERROR_FAIL;
2021
2022 sprintf(command_str, "write_misc %s 0x%" PRIx32 ";", reg_name, code);
2023 if (aice_program_edm(aice, command_str) != ERROR_OK)
2024 return ERROR_FAIL;
2025 }
2026 }
2027
2028 return ERROR_OK;
2029 }
2030
2031 int nds32_halt(struct target *target)
2032 {
2033 struct nds32 *nds32 = target_to_nds32(target);
2034 struct aice_port_s *aice = target_to_aice(target);
2035 enum target_state state;
2036
2037 LOG_DEBUG("target->state: %s",
2038 target_state_name(target));
2039
2040 if (target->state == TARGET_HALTED) {
2041 LOG_DEBUG("target was already halted");
2042 return ERROR_OK;
2043 }
2044
2045 if (nds32_target_state(nds32, &state) != ERROR_OK)
2046 return ERROR_FAIL;
2047
2048 if (state != TARGET_HALTED)
2049 /* TODO: if state == TARGET_HALTED, check ETYPE is DBGI or not */
2050 if (aice_halt(aice) != ERROR_OK)
2051 return ERROR_FAIL;
2052
2053 CHECK_RETVAL(nds32->enter_debug_state(nds32, true));
2054
2055 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_HALTED));
2056
2057 return ERROR_OK;
2058 }
2059
2060 /* poll current target status */
2061 int nds32_poll(struct target *target)
2062 {
2063 struct nds32 *nds32 = target_to_nds32(target);
2064 enum target_state state;
2065
2066 if (nds32_target_state(nds32, &state) != ERROR_OK)
2067 return ERROR_FAIL;
2068
2069 if (state == TARGET_HALTED) {
2070 if (target->state != TARGET_HALTED) {
2071 /* if false_hit, continue free_run */
2072 if (nds32->enter_debug_state(nds32, true) != ERROR_OK) {
2073 struct aice_port_s *aice = target_to_aice(target);
2074 aice_run(aice);
2075 return ERROR_OK;
2076 }
2077
2078 LOG_DEBUG("Change target state to TARGET_HALTED.");
2079
2080 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
2081 }
2082 } else if (state == TARGET_RESET) {
2083 if (target->state == TARGET_HALTED) {
2084 /* similar to assert srst */
2085 register_cache_invalidate(nds32->core_cache);
2086 target->state = TARGET_RESET;
2087
2088 /* TODO: deassert srst */
2089 } else if (target->state == TARGET_RUNNING) {
2090 /* reset as running */
2091 LOG_WARNING("<-- TARGET WARNING! The debug target has been reset. -->");
2092 }
2093 } else {
2094 if (target->state != TARGET_RUNNING && target->state != TARGET_DEBUG_RUNNING) {
2095 LOG_DEBUG("Change target state to TARGET_RUNNING.");
2096 target->state = TARGET_RUNNING;
2097 target->debug_reason = DBG_REASON_NOTHALTED;
2098 }
2099 }
2100
2101 return ERROR_OK;
2102 }
2103
2104 int nds32_resume(struct target *target, int current,
2105 target_addr_t address, int handle_breakpoints, int debug_execution)
2106 {
2107 LOG_DEBUG("current %d address %08" TARGET_PRIxADDR
2108 " handle_breakpoints %d"
2109 " debug_execution %d",
2110 current, address, handle_breakpoints, debug_execution);
2111
2112 struct nds32 *nds32 = target_to_nds32(target);
2113
2114 if (target->state != TARGET_HALTED) {
2115 LOG_ERROR("Target not halted");
2116 return ERROR_TARGET_NOT_HALTED;
2117 }
2118
2119 address = nds32_nextpc(nds32, current, address);
2120
2121 LOG_DEBUG("RESUME PC %08" TARGET_PRIxADDR "%s", address, !current ? "!" : "");
2122
2123 if (!debug_execution)
2124 target_free_all_working_areas(target);
2125
2126 /* Disable HSS to avoid users misuse HSS */
2127 if (nds32_reach_max_interrupt_level(nds32) == false) {
2128 uint32_t value_ir0;
2129 nds32_get_mapped_reg(nds32, IR0, &value_ir0);
2130 value_ir0 &= ~(0x1 << 11);
2131 nds32_set_mapped_reg(nds32, IR0, value_ir0);
2132 }
2133
2134 CHECK_RETVAL(nds32->leave_debug_state(nds32, true));
2135 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_RESUMED));
2136
2137 if (nds32->virtual_hosting_ctrl_c == false) {
2138 struct aice_port_s *aice = target_to_aice(target);
2139 aice_run(aice);
2140 } else
2141 nds32->virtual_hosting_ctrl_c = false;
2142
2143 target->debug_reason = DBG_REASON_NOTHALTED;
2144 if (!debug_execution)
2145 target->state = TARGET_RUNNING;
2146 else
2147 target->state = TARGET_DEBUG_RUNNING;
2148
2149 LOG_DEBUG("target->state: %s",
2150 target_state_name(target));
2151
2152 return ERROR_OK;
2153 }
2154
2155 static int nds32_soft_reset_halt(struct target *target)
2156 {
2157 /* TODO: test it */
2158 struct nds32 *nds32 = target_to_nds32(target);
2159 struct aice_port_s *aice = target_to_aice(target);
2160
2161 aice_assert_srst(aice, AICE_SRST);
2162
2163 /* halt core and set pc to 0x0 */
2164 int retval = target_halt(target);
2165 if (retval != ERROR_OK)
2166 return retval;
2167
2168 /* start fetching from IVB */
2169 uint32_t value_ir3;
2170 nds32_get_mapped_reg(nds32, IR3, &value_ir3);
2171 nds32_set_mapped_reg(nds32, PC, value_ir3 & 0xFFFF0000);
2172
2173 return ERROR_OK;
2174 }
2175
2176 int nds32_assert_reset(struct target *target)
2177 {
2178 struct nds32 *nds32 = target_to_nds32(target);
2179 struct aice_port_s *aice = target_to_aice(target);
2180 struct nds32_cpu_version *cpu_version = &(nds32->cpu_version);
2181
2182 /* TODO: apply hw reset signal in not examined state */
2183 if (!(target_was_examined(target))) {
2184 LOG_WARNING("Reset is not asserted because the target is not examined.");
2185 LOG_WARNING("Use a reset button or power cycle the target.");
2186 return ERROR_TARGET_NOT_EXAMINED;
2187 }
2188
2189 if (target->reset_halt) {
2190 if ((nds32->soft_reset_halt)
2191 || (nds32->edm.version < 0x51)
2192 || ((nds32->edm.version == 0x51)
2193 && (cpu_version->revision == 0x1C)
2194 && (cpu_version->cpu_id_family == 0xC)
2195 && (cpu_version->cpu_id_version == 0x0)))
2196 nds32_soft_reset_halt(target);
2197 else
2198 aice_assert_srst(aice, AICE_RESET_HOLD);
2199 } else {
2200 aice_assert_srst(aice, AICE_SRST);
2201 alive_sleep(nds32->boot_time);
2202 }
2203
2204 /* set passcode for secure MCU after core reset */
2205 nds32_login(nds32);
2206
2207 /* registers are now invalid */
2208 register_cache_invalidate(nds32->core_cache);
2209
2210 target->state = TARGET_RESET;
2211
2212 return ERROR_OK;
2213 }
2214
2215 static int nds32_gdb_attach(struct nds32 *nds32)
2216 {
2217 LOG_DEBUG("nds32_gdb_attach, target coreid: %" PRId32, nds32->target->coreid);
2218
2219 if (nds32->attached == false) {
2220
2221 if (nds32->keep_target_edm_ctl) {
2222 /* backup target EDM_CTL */
2223 struct aice_port_s *aice = target_to_aice(nds32->target);
2224 aice_read_debug_reg(aice, NDS_EDM_SR_EDM_CTL, &nds32->backup_edm_ctl);
2225 }
2226
2227 target_halt(nds32->target);
2228
2229 nds32->attached = true;
2230 }
2231
2232 return ERROR_OK;
2233 }
2234
2235 static int nds32_gdb_detach(struct nds32 *nds32)
2236 {
2237 LOG_DEBUG("nds32_gdb_detach");
2238 bool backup_virtual_hosting_setting;
2239
2240 if (nds32->attached) {
2241
2242 backup_virtual_hosting_setting = nds32->virtual_hosting;
2243 /* turn off virtual hosting before resume as gdb-detach */
2244 nds32->virtual_hosting = false;
2245 target_resume(nds32->target, 1, 0, 0, 0);
2246 nds32->virtual_hosting = backup_virtual_hosting_setting;
2247
2248 if (nds32->keep_target_edm_ctl) {
2249 /* restore target EDM_CTL */
2250 struct aice_port_s *aice = target_to_aice(nds32->target);
2251 aice_write_debug_reg(aice, NDS_EDM_SR_EDM_CTL, nds32->backup_edm_ctl);
2252 }
2253
2254 nds32->attached = false;
2255 }
2256
2257 return ERROR_OK;
2258 }
2259
2260 static int nds32_callback_event_handler(struct target *target,
2261 enum target_event event, void *priv)
2262 {
2263 int retval = ERROR_OK;
2264 int target_number = *(int *)priv;
2265
2266 if (target_number != target->target_number)
2267 return ERROR_OK;
2268
2269 struct nds32 *nds32 = target_to_nds32(target);
2270
2271 switch (event) {
2272 case TARGET_EVENT_GDB_ATTACH:
2273 retval = nds32_gdb_attach(nds32);
2274 break;
2275 case TARGET_EVENT_GDB_DETACH:
2276 retval = nds32_gdb_detach(nds32);
2277 break;
2278 default:
2279 break;
2280 }
2281
2282 return retval;
2283 }
2284
2285 int nds32_init(struct nds32 *nds32)
2286 {
2287 /* Initialize anything we can set up without talking to the target */
2288 nds32->memory.access_channel = NDS_MEMORY_ACC_CPU;
2289
2290 /* register event callback */
2291 target_register_event_callback(nds32_callback_event_handler,
2292 &(nds32->target->target_number));
2293
2294 return ERROR_OK;
2295 }
2296
2297 int nds32_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
2298 {
2299 /* fill syscall parameters to file-I/O info */
2300 if (!fileio_info) {
2301 LOG_ERROR("Target has not initial file-I/O data structure");
2302 return ERROR_FAIL;
2303 }
2304
2305 struct nds32 *nds32 = target_to_nds32(target);
2306 uint32_t value_ir6;
2307 uint32_t syscall_id;
2308
2309 if (nds32->hit_syscall == false)
2310 return ERROR_FAIL;
2311
2312 nds32_get_mapped_reg(nds32, IR6, &value_ir6);
2313 syscall_id = (value_ir6 >> 16) & 0x7FFF;
2314 nds32->active_syscall_id = syscall_id;
2315
2316 LOG_DEBUG("hit syscall ID: 0x%" PRIx32, syscall_id);
2317
2318 /* free previous identifier storage */
2319 free(fileio_info->identifier);
2320 fileio_info->identifier = NULL;
2321
2322 uint32_t reg_r0, reg_r1, reg_r2;
2323 nds32_get_mapped_reg(nds32, R0, &reg_r0);
2324 nds32_get_mapped_reg(nds32, R1, &reg_r1);
2325 nds32_get_mapped_reg(nds32, R2, &reg_r2);
2326
2327 switch (syscall_id) {
2328 case NDS32_SYSCALL_EXIT:
2329 fileio_info->identifier = malloc(5);
2330 sprintf(fileio_info->identifier, "exit");
2331 fileio_info->param_1 = reg_r0;
2332 break;
2333 case NDS32_SYSCALL_OPEN:
2334 {
2335 uint8_t filename[256];
2336 fileio_info->identifier = malloc(5);
2337 sprintf(fileio_info->identifier, "open");
2338 fileio_info->param_1 = reg_r0;
2339 /* reserve fileio_info->param_2 for length of path */
2340 fileio_info->param_3 = reg_r1;
2341 fileio_info->param_4 = reg_r2;
2342
2343 target->type->read_buffer(target, reg_r0, 256, filename);
2344 fileio_info->param_2 = strlen((char *)filename);
2345 }
2346 break;
2347 case NDS32_SYSCALL_CLOSE:
2348 fileio_info->identifier = malloc(6);
2349 sprintf(fileio_info->identifier, "close");
2350 fileio_info->param_1 = reg_r0;
2351 break;
2352 case NDS32_SYSCALL_READ:
2353 fileio_info->identifier = malloc(5);
2354 sprintf(fileio_info->identifier, "read");
2355 fileio_info->param_1 = reg_r0;
2356 fileio_info->param_2 = reg_r1;
2357 fileio_info->param_3 = reg_r2;
2358 break;
2359 case NDS32_SYSCALL_WRITE:
2360 fileio_info->identifier = malloc(6);
2361 sprintf(fileio_info->identifier, "write");
2362 fileio_info->param_1 = reg_r0;
2363 fileio_info->param_2 = reg_r1;
2364 fileio_info->param_3 = reg_r2;
2365 break;
2366 case NDS32_SYSCALL_LSEEK:
2367 fileio_info->identifier = malloc(6);
2368 sprintf(fileio_info->identifier, "lseek");
2369 fileio_info->param_1 = reg_r0;
2370 fileio_info->param_2 = reg_r1;
2371 fileio_info->param_3 = reg_r2;
2372 break;
2373 case NDS32_SYSCALL_UNLINK:
2374 {
2375 uint8_t filename[256];
2376 fileio_info->identifier = malloc(7);
2377 sprintf(fileio_info->identifier, "unlink");
2378 fileio_info->param_1 = reg_r0;
2379 /* reserve fileio_info->param_2 for length of path */
2380
2381 target->type->read_buffer(target, reg_r0, 256, filename);
2382 fileio_info->param_2 = strlen((char *)filename);
2383 }
2384 break;
2385 case NDS32_SYSCALL_RENAME:
2386 {
2387 uint8_t filename[256];
2388 fileio_info->identifier = malloc(7);
2389 sprintf(fileio_info->identifier, "rename");
2390 fileio_info->param_1 = reg_r0;
2391 /* reserve fileio_info->param_2 for length of old path */
2392 fileio_info->param_3 = reg_r1;
2393 /* reserve fileio_info->param_4 for length of new path */
2394
2395 target->type->read_buffer(target, reg_r0, 256, filename);
2396 fileio_info->param_2 = strlen((char *)filename);
2397
2398 target->type->read_buffer(target, reg_r1, 256, filename);
2399 fileio_info->param_4 = strlen((char *)filename);
2400 }
2401 break;
2402 case NDS32_SYSCALL_FSTAT:
2403 fileio_info->identifier = malloc(6);
2404 sprintf(fileio_info->identifier, "fstat");
2405 fileio_info->param_1 = reg_r0;
2406 fileio_info->param_2 = reg_r1;
2407 break;
2408 case NDS32_SYSCALL_STAT:
2409 {
2410 uint8_t filename[256];
2411 fileio_info->identifier = malloc(5);
2412 sprintf(fileio_info->identifier, "stat");
2413 fileio_info->param_1 = reg_r0;
2414 /* reserve fileio_info->param_2 for length of old path */
2415 fileio_info->param_3 = reg_r1;
2416
2417 target->type->read_buffer(target, reg_r0, 256, filename);
2418 fileio_info->param_2 = strlen((char *)filename) + 1;
2419 }
2420 break;
2421 case NDS32_SYSCALL_GETTIMEOFDAY:
2422 fileio_info->identifier = malloc(13);
2423 sprintf(fileio_info->identifier, "gettimeofday");
2424 fileio_info->param_1 = reg_r0;
2425 fileio_info->param_2 = reg_r1;
2426 break;
2427 case NDS32_SYSCALL_ISATTY:
2428 fileio_info->identifier = malloc(7);
2429 sprintf(fileio_info->identifier, "isatty");
2430 fileio_info->param_1 = reg_r0;
2431 break;
2432 case NDS32_SYSCALL_SYSTEM:
2433 {
2434 uint8_t command[256];
2435 fileio_info->identifier = malloc(7);
2436 sprintf(fileio_info->identifier, "system");
2437 fileio_info->param_1 = reg_r0;
2438 /* reserve fileio_info->param_2 for length of old path */
2439
2440 target->type->read_buffer(target, reg_r0, 256, command);
2441 fileio_info->param_2 = strlen((char *)command);
2442 }
2443 break;
2444 case NDS32_SYSCALL_ERRNO:
2445 fileio_info->identifier = malloc(6);
2446 sprintf(fileio_info->identifier, "errno");
2447 nds32_set_mapped_reg(nds32, R0, nds32->virtual_hosting_errno);
2448 break;
2449 default:
2450 fileio_info->identifier = malloc(8);
2451 sprintf(fileio_info->identifier, "unknown");
2452 break;
2453 }
2454
2455 return ERROR_OK;
2456 }
2457
2458 int nds32_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
2459 {
2460 LOG_DEBUG("syscall return code: 0x%x, errno: 0x%x , ctrl_c: %s",
2461 retcode, fileio_errno, ctrl_c ? "true" : "false");
2462
2463 struct nds32 *nds32 = target_to_nds32(target);
2464
2465 nds32_set_mapped_reg(nds32, R0, (uint32_t)retcode);
2466
2467 nds32->virtual_hosting_errno = fileio_errno;
2468 nds32->virtual_hosting_ctrl_c = ctrl_c;
2469 nds32->active_syscall_id = NDS32_SYSCALL_UNDEFINED;
2470
2471 return ERROR_OK;
2472 }
2473
2474 int nds32_profiling(struct target *target, uint32_t *samples,
2475 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2476 {
2477 /* sample $PC every 10 milliseconds */
2478 uint32_t iteration = seconds * 100;
2479 struct aice_port_s *aice = target_to_aice(target);
2480 struct nds32 *nds32 = target_to_nds32(target);
2481
2482 /* REVISIT: can nds32 profile without halting? */
2483 if (target->state != TARGET_HALTED) {
2484 LOG_WARNING("target %s is not halted (profiling)", target->cmd_name);
2485 return ERROR_TARGET_NOT_HALTED;
2486 }
2487
2488 if (max_num_samples < iteration)
2489 iteration = max_num_samples;
2490
2491 int pc_regnum = nds32->register_map(nds32, PC);
2492 aice_profiling(aice, 10, iteration, pc_regnum, samples, num_samples);
2493
2494 register_cache_invalidate(nds32->core_cache);
2495
2496 return ERROR_OK;
2497 }
2498
2499 int nds32_gdb_fileio_write_memory(struct nds32 *nds32, uint32_t address,
2500 uint32_t size, const uint8_t *buffer)
2501 {
2502 if ((nds32->active_syscall_id == NDS32_SYSCALL_FSTAT) ||
2503 (nds32->active_syscall_id == NDS32_SYSCALL_STAT)) {
2504 /* If doing GDB file-I/O, target should convert 'struct stat'
2505 * from gdb-format to target-format */
2506 uint8_t stat_buffer[NDS32_STRUCT_STAT_SIZE];
2507 /* st_dev 2 */
2508 stat_buffer[0] = buffer[3];
2509 stat_buffer[1] = buffer[2];
2510 /* st_ino 2 */
2511 stat_buffer[2] = buffer[7];
2512 stat_buffer[3] = buffer[6];
2513 /* st_mode 4 */
2514 stat_buffer[4] = buffer[11];
2515 stat_buffer[5] = buffer[10];
2516 stat_buffer[6] = buffer[9];
2517 stat_buffer[7] = buffer[8];
2518 /* st_nlink 2 */
2519 stat_buffer[8] = buffer[15];
2520 stat_buffer[9] = buffer[16];
2521 /* st_uid 2 */
2522 stat_buffer[10] = buffer[19];
2523 stat_buffer[11] = buffer[18];
2524 /* st_gid 2 */
2525 stat_buffer[12] = buffer[23];
2526 stat_buffer[13] = buffer[22];
2527 /* st_rdev 2 */
2528 stat_buffer[14] = buffer[27];
2529 stat_buffer[15] = buffer[26];
2530 /* st_size 4 */
2531 stat_buffer[16] = buffer[35];
2532 stat_buffer[17] = buffer[34];
2533 stat_buffer[18] = buffer[33];
2534 stat_buffer[19] = buffer[32];
2535 /* st_atime 4 */
2536 stat_buffer[20] = buffer[55];
2537 stat_buffer[21] = buffer[54];
2538 stat_buffer[22] = buffer[53];
2539 stat_buffer[23] = buffer[52];
2540 /* st_spare1 4 */
2541 stat_buffer[24] = 0;
2542 stat_buffer[25] = 0;
2543 stat_buffer[26] = 0;
2544 stat_buffer[27] = 0;
2545 /* st_mtime 4 */
2546 stat_buffer[28] = buffer[59];
2547 stat_buffer[29] = buffer[58];
2548 stat_buffer[30] = buffer[57];
2549 stat_buffer[31] = buffer[56];
2550 /* st_spare2 4 */
2551 stat_buffer[32] = 0;
2552 stat_buffer[33] = 0;
2553 stat_buffer[34] = 0;
2554 stat_buffer[35] = 0;
2555 /* st_ctime 4 */
2556 stat_buffer[36] = buffer[63];
2557 stat_buffer[37] = buffer[62];
2558 stat_buffer[38] = buffer[61];
2559 stat_buffer[39] = buffer[60];
2560 /* st_spare3 4 */
2561 stat_buffer[40] = 0;
2562 stat_buffer[41] = 0;
2563 stat_buffer[42] = 0;
2564 stat_buffer[43] = 0;
2565 /* st_blksize 4 */
2566 stat_buffer[44] = buffer[43];
2567 stat_buffer[45] = buffer[42];
2568 stat_buffer[46] = buffer[41];
2569 stat_buffer[47] = buffer[40];
2570 /* st_blocks 4 */
2571 stat_buffer[48] = buffer[51];
2572 stat_buffer[49] = buffer[50];
2573 stat_buffer[50] = buffer[49];
2574 stat_buffer[51] = buffer[48];
2575 /* st_spare4 8 */
2576 stat_buffer[52] = 0;
2577 stat_buffer[53] = 0;
2578 stat_buffer[54] = 0;
2579 stat_buffer[55] = 0;
2580 stat_buffer[56] = 0;
2581 stat_buffer[57] = 0;
2582 stat_buffer[58] = 0;
2583 stat_buffer[59] = 0;
2584
2585 return nds32_write_buffer(nds32->target, address, NDS32_STRUCT_STAT_SIZE, stat_buffer);
2586 } else if (nds32->active_syscall_id == NDS32_SYSCALL_GETTIMEOFDAY) {
2587 /* If doing GDB file-I/O, target should convert 'struct timeval'
2588 * from gdb-format to target-format */
2589 uint8_t timeval_buffer[NDS32_STRUCT_TIMEVAL_SIZE];
2590 timeval_buffer[0] = buffer[3];
2591 timeval_buffer[1] = buffer[2];
2592 timeval_buffer[2] = buffer[1];
2593 timeval_buffer[3] = buffer[0];
2594 timeval_buffer[4] = buffer[11];
2595 timeval_buffer[5] = buffer[10];
2596 timeval_buffer[6] = buffer[9];
2597 timeval_buffer[7] = buffer[8];
2598
2599 return nds32_write_buffer(nds32->target, address, NDS32_STRUCT_TIMEVAL_SIZE, timeval_buffer);
2600 }
2601
2602 return nds32_write_buffer(nds32->target, address, size, buffer);
2603 }
2604
2605 int nds32_reset_halt(struct nds32 *nds32)
2606 {
2607 LOG_INFO("reset halt as init");
2608
2609 struct aice_port_s *aice = target_to_aice(nds32->target);
2610 aice_assert_srst(aice, AICE_RESET_HOLD);
2611
2612 return ERROR_OK;
2613 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)