target: Rename 'linked_BRP' to 'linked_brp'
[openocd.git] / src / target / nds32.c
1 /***************************************************************************
2 * Copyright (C) 2013 Andes Technology *
3 * Hsiangkai Wang <hkwang@andestech.com> *
4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
17 ***************************************************************************/
18
19 #ifdef HAVE_CONFIG_H
20 #include "config.h"
21 #endif
22
23 #include <helper/log.h>
24 #include <helper/binarybuffer.h>
25 #include "nds32.h"
26 #include "nds32_aice.h"
27 #include "nds32_tlb.h"
28 #include "nds32_disassembler.h"
29
30 const int NDS32_BREAK_16 = 0x00EA; /* 0xEA00 */
31 const int NDS32_BREAK_32 = 0x0A000064; /* 0x6400000A */
32
33 struct nds32_edm_operation nds32_edm_ops[NDS32_EDM_OPERATION_MAX_NUM];
34 uint32_t nds32_edm_ops_num;
35
36 const char *nds32_debug_type_name[11] = {
37 "SOFTWARE BREAK",
38 "SOFTWARE BREAK_16",
39 "HARDWARE BREAKPOINT",
40 "DATA ADDR WATCHPOINT PRECISE",
41 "DATA VALUE WATCHPOINT PRECISE",
42 "DATA VALUE WATCHPOINT IMPRECISE",
43 "DEBUG INTERRUPT",
44 "HARDWARE SINGLE STEP",
45 "DATA ADDR WATCHPOINT NEXT PRECISE",
46 "DATA VALUE WATCHPOINT NEXT PRECISE",
47 "LOAD STORE GLOBAL STOP",
48 };
49
50 static const int NDS32_LM_SIZE_TABLE[16] = {
51 4 * 1024,
52 8 * 1024,
53 16 * 1024,
54 32 * 1024,
55 64 * 1024,
56 128 * 1024,
57 256 * 1024,
58 512 * 1024,
59 1024 * 1024,
60 1 * 1024,
61 2 * 1024,
62 };
63
64 static const int NDS32_LINE_SIZE_TABLE[6] = {
65 0,
66 8,
67 16,
68 32,
69 64,
70 128,
71 };
72
73 static int nds32_get_core_reg(struct reg *reg)
74 {
75 int retval;
76 struct nds32_reg *reg_arch_info = reg->arch_info;
77 struct target *target = reg_arch_info->target;
78 struct nds32 *nds32 = target_to_nds32(target);
79 struct aice_port_s *aice = target_to_aice(target);
80
81 if (target->state != TARGET_HALTED) {
82 LOG_ERROR("Target not halted");
83 return ERROR_TARGET_NOT_HALTED;
84 }
85
86 if (reg->valid) {
87 uint32_t val = buf_get_u32(reg_arch_info->value, 0, 32);
88 LOG_DEBUG("reading register(cached) %" PRIi32 "(%s), value: 0x%8.8" PRIx32,
89 reg_arch_info->num, reg->name, val);
90 return ERROR_OK;
91 }
92
93 int mapped_regnum = nds32->register_map(nds32, reg_arch_info->num);
94
95 if (reg_arch_info->enable == false) {
96 buf_set_u32(reg_arch_info->value, 0, 32, NDS32_REGISTER_DISABLE);
97 retval = ERROR_FAIL;
98 } else {
99 uint32_t val = 0;
100 if ((nds32->fpu_enable == false)
101 && (NDS32_REG_TYPE_FPU == nds32_reg_type(mapped_regnum))) {
102 retval = ERROR_OK;
103 } else if ((nds32->audio_enable == false)
104 && (NDS32_REG_TYPE_AUMR == nds32_reg_type(mapped_regnum))) {
105 retval = ERROR_OK;
106 } else {
107 retval = aice_read_register(aice, mapped_regnum, &val);
108 }
109 buf_set_u32(reg_arch_info->value, 0, 32, val);
110
111 LOG_DEBUG("reading register %" PRIi32 "(%s), value: 0x%8.8" PRIx32,
112 reg_arch_info->num, reg->name, val);
113 }
114
115 if (retval == ERROR_OK) {
116 reg->valid = true;
117 reg->dirty = false;
118 }
119
120 return retval;
121 }
122
123 static int nds32_get_core_reg_64(struct reg *reg)
124 {
125 int retval;
126 struct nds32_reg *reg_arch_info = reg->arch_info;
127 struct target *target = reg_arch_info->target;
128 struct nds32 *nds32 = target_to_nds32(target);
129 struct aice_port_s *aice = target_to_aice(target);
130
131 if (target->state != TARGET_HALTED) {
132 LOG_ERROR("Target not halted");
133 return ERROR_TARGET_NOT_HALTED;
134 }
135
136 if (reg->valid)
137 return ERROR_OK;
138
139 if (reg_arch_info->enable == false) {
140 buf_set_u64(reg_arch_info->value, 0, 64, NDS32_REGISTER_DISABLE);
141 retval = ERROR_FAIL;
142 } else {
143 uint64_t val = 0;
144 if ((nds32->fpu_enable == false)
145 && ((FD0 <= reg_arch_info->num) && (reg_arch_info->num <= FD31))) {
146 retval = ERROR_OK;
147 } else {
148 retval = aice_read_reg_64(aice, reg_arch_info->num, &val);
149 }
150 buf_set_u64(reg_arch_info->value, 0, 64, val);
151 }
152
153 if (retval == ERROR_OK) {
154 reg->valid = true;
155 reg->dirty = false;
156 }
157
158 return retval;
159 }
160
161 static int nds32_update_psw(struct nds32 *nds32)
162 {
163 uint32_t value_ir0;
164 struct aice_port_s *aice = target_to_aice(nds32->target);
165
166 nds32_get_mapped_reg(nds32, IR0, &value_ir0);
167
168 /* Save data memory endian */
169 if ((value_ir0 >> 5) & 0x1) {
170 nds32->data_endian = TARGET_BIG_ENDIAN;
171 aice_set_data_endian(aice, AICE_BIG_ENDIAN);
172 } else {
173 nds32->data_endian = TARGET_LITTLE_ENDIAN;
174 aice_set_data_endian(aice, AICE_LITTLE_ENDIAN);
175 }
176
177 /* Save translation status */
178 nds32->memory.address_translation = ((value_ir0 >> 7) & 0x1) ? true : false;
179
180 return ERROR_OK;
181 }
182
183 static int nds32_update_mmu_info(struct nds32 *nds32)
184 {
185 uint32_t value;
186
187 /* Update MMU control status */
188 nds32_get_mapped_reg(nds32, MR0, &value);
189 nds32->mmu_config.default_min_page_size = value & 0x1;
190 nds32->mmu_config.multiple_page_size_in_use = (value >> 10) & 0x1;
191
192 return ERROR_OK;
193 }
194
195 static int nds32_update_cache_info(struct nds32 *nds32)
196 {
197 uint32_t value;
198
199 if (ERROR_OK == nds32_get_mapped_reg(nds32, MR8, &value)) {
200 if (value & 0x1)
201 nds32->memory.icache.enable = true;
202 else
203 nds32->memory.icache.enable = false;
204
205 if (value & 0x2)
206 nds32->memory.dcache.enable = true;
207 else
208 nds32->memory.dcache.enable = false;
209 } else {
210 nds32->memory.icache.enable = false;
211 nds32->memory.dcache.enable = false;
212 }
213
214 return ERROR_OK;
215 }
216
217 static int nds32_update_lm_info(struct nds32 *nds32)
218 {
219 struct nds32_memory *memory = &(nds32->memory);
220 uint32_t value_mr6;
221 uint32_t value_mr7;
222
223 nds32_get_mapped_reg(nds32, MR6, &value_mr6);
224 if (value_mr6 & 0x1)
225 memory->ilm_enable = true;
226 else
227 memory->ilm_enable = false;
228
229 if (memory->ilm_align_ver == 0) { /* 1MB aligned */
230 memory->ilm_start = value_mr6 & 0xFFF00000;
231 memory->ilm_end = memory->ilm_start + memory->ilm_size;
232 } else if (memory->ilm_align_ver == 1) { /* aligned to local memory size */
233 memory->ilm_start = value_mr6 & 0xFFFFFC00;
234 memory->ilm_end = memory->ilm_start + memory->ilm_size;
235 } else {
236 memory->ilm_start = -1;
237 memory->ilm_end = -1;
238 }
239
240 nds32_get_mapped_reg(nds32, MR7, &value_mr7);
241 if (value_mr7 & 0x1)
242 memory->dlm_enable = true;
243 else
244 memory->dlm_enable = false;
245
246 if (memory->dlm_align_ver == 0) { /* 1MB aligned */
247 memory->dlm_start = value_mr7 & 0xFFF00000;
248 memory->dlm_end = memory->dlm_start + memory->dlm_size;
249 } else if (memory->dlm_align_ver == 1) { /* aligned to local memory size */
250 memory->dlm_start = value_mr7 & 0xFFFFFC00;
251 memory->dlm_end = memory->dlm_start + memory->dlm_size;
252 } else {
253 memory->dlm_start = -1;
254 memory->dlm_end = -1;
255 }
256
257 return ERROR_OK;
258 }
259
260 /**
261 * If fpu/audio is disabled, to access fpu/audio registers will cause
262 * exceptions. So, we need to check if fpu/audio is enabled or not as
263 * target is halted. If fpu/audio is disabled, as users access fpu/audio
264 * registers, OpenOCD will return fake value 0 instead of accessing
265 * registers through DIM.
266 */
267 static int nds32_check_extension(struct nds32 *nds32)
268 {
269 uint32_t value;
270
271 nds32_get_mapped_reg(nds32, FUCPR, &value);
272 if (value == NDS32_REGISTER_DISABLE) {
273 nds32->fpu_enable = false;
274 nds32->audio_enable = false;
275 return ERROR_OK;
276 }
277
278 if (value & 0x1)
279 nds32->fpu_enable = true;
280 else
281 nds32->fpu_enable = false;
282
283 if (value & 0x80000000)
284 nds32->audio_enable = true;
285 else
286 nds32->audio_enable = false;
287
288 return ERROR_OK;
289 }
290
291 static int nds32_set_core_reg(struct reg *reg, uint8_t *buf)
292 {
293 struct nds32_reg *reg_arch_info = reg->arch_info;
294 struct target *target = reg_arch_info->target;
295 struct nds32 *nds32 = target_to_nds32(target);
296 struct aice_port_s *aice = target_to_aice(target);
297 uint32_t value = buf_get_u32(buf, 0, 32);
298
299 if (target->state != TARGET_HALTED) {
300 LOG_ERROR("Target not halted");
301 return ERROR_TARGET_NOT_HALTED;
302 }
303
304 int mapped_regnum = nds32->register_map(nds32, reg_arch_info->num);
305
306 /* ignore values that will generate exception */
307 if (nds32_reg_exception(mapped_regnum, value))
308 return ERROR_OK;
309
310 LOG_DEBUG("writing register %" PRIi32 "(%s) with value 0x%8.8" PRIx32,
311 reg_arch_info->num, reg->name, value);
312
313 if ((nds32->fpu_enable == false) &&
314 (NDS32_REG_TYPE_FPU == nds32_reg_type(mapped_regnum))) {
315
316 buf_set_u32(reg->value, 0, 32, 0);
317 } else if ((nds32->audio_enable == false) &&
318 (NDS32_REG_TYPE_AUMR == nds32_reg_type(mapped_regnum))) {
319
320 buf_set_u32(reg->value, 0, 32, 0);
321 } else {
322 buf_set_u32(reg->value, 0, 32, value);
323 uint32_t val = buf_get_u32(reg_arch_info->value, 0, 32);
324 aice_write_register(aice, mapped_regnum, val);
325
326 /* After set value to registers, read the value from target
327 * to avoid W1C inconsistency. */
328 aice_read_register(aice, mapped_regnum, &val);
329 buf_set_u32(reg_arch_info->value, 0, 32, val);
330 }
331
332 reg->valid = true;
333 reg->dirty = false;
334
335 /* update registers to take effect right now */
336 if (IR0 == mapped_regnum) {
337 nds32_update_psw(nds32);
338 } else if (MR0 == mapped_regnum) {
339 nds32_update_mmu_info(nds32);
340 } else if ((MR6 == mapped_regnum) || (MR7 == mapped_regnum)) {
341 /* update lm information */
342 nds32_update_lm_info(nds32);
343 } else if (MR8 == mapped_regnum) {
344 nds32_update_cache_info(nds32);
345 } else if (FUCPR == mapped_regnum) {
346 /* update audio/fpu setting */
347 nds32_check_extension(nds32);
348 }
349
350 return ERROR_OK;
351 }
352
353 static int nds32_set_core_reg_64(struct reg *reg, uint8_t *buf)
354 {
355 struct nds32_reg *reg_arch_info = reg->arch_info;
356 struct target *target = reg_arch_info->target;
357 struct nds32 *nds32 = target_to_nds32(target);
358 uint32_t low_part = buf_get_u32(buf, 0, 32);
359 uint32_t high_part = buf_get_u32(buf, 32, 32);
360
361 if (target->state != TARGET_HALTED) {
362 LOG_ERROR("Target not halted");
363 return ERROR_TARGET_NOT_HALTED;
364 }
365
366 if ((nds32->fpu_enable == false) &&
367 ((FD0 <= reg_arch_info->num) && (reg_arch_info->num <= FD31))) {
368
369 buf_set_u32(reg->value, 0, 32, 0);
370 buf_set_u32(reg->value, 32, 32, 0);
371
372 reg->valid = true;
373 reg->dirty = false;
374 } else {
375 buf_set_u32(reg->value, 0, 32, low_part);
376 buf_set_u32(reg->value, 32, 32, high_part);
377
378 reg->valid = true;
379 reg->dirty = true;
380 }
381
382 return ERROR_OK;
383 }
384
385 static const struct reg_arch_type nds32_reg_access_type = {
386 .get = nds32_get_core_reg,
387 .set = nds32_set_core_reg,
388 };
389
390 static const struct reg_arch_type nds32_reg_access_type_64 = {
391 .get = nds32_get_core_reg_64,
392 .set = nds32_set_core_reg_64,
393 };
394
395 static struct reg_cache *nds32_build_reg_cache(struct target *target,
396 struct nds32 *nds32)
397 {
398 struct reg_cache *cache = calloc(sizeof(struct reg_cache), 1);
399 struct reg *reg_list = calloc(TOTAL_REG_NUM, sizeof(struct reg));
400 struct nds32_reg *reg_arch_info = calloc(TOTAL_REG_NUM, sizeof(struct nds32_reg));
401 int i;
402
403 if (!cache || !reg_list || !reg_arch_info) {
404 free(cache);
405 free(reg_list);
406 free(reg_arch_info);
407 return NULL;
408 }
409
410 cache->name = "Andes registers";
411 cache->next = NULL;
412 cache->reg_list = reg_list;
413 cache->num_regs = 0;
414
415 for (i = 0; i < TOTAL_REG_NUM; i++) {
416 reg_arch_info[i].num = i;
417 reg_arch_info[i].target = target;
418 reg_arch_info[i].nds32 = nds32;
419 reg_arch_info[i].enable = false;
420
421 reg_list[i].name = nds32_reg_simple_name(i);
422 reg_list[i].number = reg_arch_info[i].num;
423 reg_list[i].size = nds32_reg_size(i);
424 reg_list[i].arch_info = &reg_arch_info[i];
425
426 reg_list[i].reg_data_type = calloc(sizeof(struct reg_data_type), 1);
427
428 if (FD0 <= reg_arch_info[i].num && reg_arch_info[i].num <= FD31) {
429 reg_list[i].value = reg_arch_info[i].value;
430 reg_list[i].type = &nds32_reg_access_type_64;
431
432 reg_list[i].reg_data_type->type = REG_TYPE_IEEE_DOUBLE;
433 reg_list[i].reg_data_type->id = "ieee_double";
434 reg_list[i].group = "float";
435 } else {
436 reg_list[i].value = reg_arch_info[i].value;
437 reg_list[i].type = &nds32_reg_access_type;
438 reg_list[i].group = "general";
439
440 if ((FS0 <= reg_arch_info[i].num) && (reg_arch_info[i].num <= FS31)) {
441 reg_list[i].reg_data_type->type = REG_TYPE_IEEE_SINGLE;
442 reg_list[i].reg_data_type->id = "ieee_single";
443 reg_list[i].group = "float";
444 } else if ((reg_arch_info[i].num == FPCSR) ||
445 (reg_arch_info[i].num == FPCFG)) {
446 reg_list[i].group = "float";
447 } else if ((reg_arch_info[i].num == R28) ||
448 (reg_arch_info[i].num == R29) ||
449 (reg_arch_info[i].num == R31)) {
450 reg_list[i].reg_data_type->type = REG_TYPE_DATA_PTR;
451 reg_list[i].reg_data_type->id = "data_ptr";
452 } else if ((reg_arch_info[i].num == R30) ||
453 (reg_arch_info[i].num == PC)) {
454 reg_list[i].reg_data_type->type = REG_TYPE_CODE_PTR;
455 reg_list[i].reg_data_type->id = "code_ptr";
456 } else {
457 reg_list[i].reg_data_type->type = REG_TYPE_UINT32;
458 reg_list[i].reg_data_type->id = "uint32";
459 }
460 }
461
462 if (R16 <= reg_arch_info[i].num && reg_arch_info[i].num <= R25)
463 reg_list[i].caller_save = true;
464 else
465 reg_list[i].caller_save = false;
466
467 reg_list[i].feature = malloc(sizeof(struct reg_feature));
468
469 if (R0 <= reg_arch_info[i].num && reg_arch_info[i].num <= IFC_LP)
470 reg_list[i].feature->name = "org.gnu.gdb.nds32.core";
471 else if (CR0 <= reg_arch_info[i].num && reg_arch_info[i].num <= SECUR0)
472 reg_list[i].feature->name = "org.gnu.gdb.nds32.system";
473 else if (D0L24 <= reg_arch_info[i].num && reg_arch_info[i].num <= CBE3)
474 reg_list[i].feature->name = "org.gnu.gdb.nds32.audio";
475 else if (FPCSR <= reg_arch_info[i].num && reg_arch_info[i].num <= FD31)
476 reg_list[i].feature->name = "org.gnu.gdb.nds32.fpu";
477
478 cache->num_regs++;
479 }
480
481 nds32->core_cache = cache;
482
483 return cache;
484 }
485
486 static int nds32_reg_cache_init(struct target *target, struct nds32 *nds32)
487 {
488 struct reg_cache *cache;
489
490 cache = nds32_build_reg_cache(target, nds32);
491 if (!cache)
492 return ERROR_FAIL;
493
494 *register_get_last_cache_p(&target->reg_cache) = cache;
495
496 return ERROR_OK;
497 }
498
499 static struct reg *nds32_reg_current(struct nds32 *nds32, unsigned regnum)
500 {
501 struct reg *r;
502
503 r = nds32->core_cache->reg_list + regnum;
504
505 return r;
506 }
507
508 int nds32_full_context(struct nds32 *nds32)
509 {
510 uint32_t value, value_ir0;
511
512 /* save $pc & $psw */
513 nds32_get_mapped_reg(nds32, PC, &value);
514 nds32_get_mapped_reg(nds32, IR0, &value_ir0);
515
516 nds32_update_psw(nds32);
517 nds32_update_mmu_info(nds32);
518 nds32_update_cache_info(nds32);
519 nds32_update_lm_info(nds32);
520
521 nds32_check_extension(nds32);
522
523 return ERROR_OK;
524 }
525
526 /* get register value internally */
527 int nds32_get_mapped_reg(struct nds32 *nds32, unsigned regnum, uint32_t *value)
528 {
529 struct reg_cache *reg_cache = nds32->core_cache;
530 struct reg *r;
531
532 if (regnum > reg_cache->num_regs)
533 return ERROR_FAIL;
534
535 r = nds32_reg_current(nds32, regnum);
536
537 if (ERROR_OK != r->type->get(r))
538 return ERROR_FAIL;
539
540 *value = buf_get_u32(r->value, 0, 32);
541
542 return ERROR_OK;
543 }
544
545 /** set register internally */
546 int nds32_set_mapped_reg(struct nds32 *nds32, unsigned regnum, uint32_t value)
547 {
548 struct reg_cache *reg_cache = nds32->core_cache;
549 struct reg *r;
550 uint8_t set_value[4];
551
552 if (regnum > reg_cache->num_regs)
553 return ERROR_FAIL;
554
555 r = nds32_reg_current(nds32, regnum);
556
557 buf_set_u32(set_value, 0, 32, value);
558
559 return r->type->set(r, set_value);
560 }
561
562 /** get general register list */
563 static int nds32_get_general_reg_list(struct nds32 *nds32,
564 struct reg **reg_list[], int *reg_list_size)
565 {
566 struct reg *reg_current;
567 int i;
568 int current_idx;
569
570 /** freed in gdb_server.c */
571 *reg_list = malloc(sizeof(struct reg *) * (IFC_LP - R0 + 1));
572 current_idx = 0;
573
574 for (i = R0; i < IFC_LP + 1; i++) {
575 reg_current = nds32_reg_current(nds32, i);
576 if (((struct nds32_reg *)reg_current->arch_info)->enable) {
577 (*reg_list)[current_idx] = reg_current;
578 current_idx++;
579 }
580 }
581 *reg_list_size = current_idx;
582
583 return ERROR_OK;
584 }
585
586 /** get all register list */
587 static int nds32_get_all_reg_list(struct nds32 *nds32,
588 struct reg **reg_list[], int *reg_list_size)
589 {
590 struct reg_cache *reg_cache = nds32->core_cache;
591 struct reg *reg_current;
592 unsigned int i;
593
594 *reg_list_size = reg_cache->num_regs;
595
596 /** freed in gdb_server.c */
597 *reg_list = malloc(sizeof(struct reg *) * (*reg_list_size));
598
599 for (i = 0; i < reg_cache->num_regs; i++) {
600 reg_current = nds32_reg_current(nds32, i);
601 reg_current->exist = ((struct nds32_reg *)
602 reg_current->arch_info)->enable;
603 (*reg_list)[i] = reg_current;
604 }
605
606 return ERROR_OK;
607 }
608
609 /** get all register list */
610 int nds32_get_gdb_reg_list(struct target *target,
611 struct reg **reg_list[], int *reg_list_size,
612 enum target_register_class reg_class)
613 {
614 struct nds32 *nds32 = target_to_nds32(target);
615
616 switch (reg_class) {
617 case REG_CLASS_ALL:
618 return nds32_get_all_reg_list(nds32, reg_list, reg_list_size);
619 case REG_CLASS_GENERAL:
620 return nds32_get_general_reg_list(nds32, reg_list, reg_list_size);
621 default:
622 return ERROR_FAIL;
623 }
624
625 return ERROR_FAIL;
626 }
627
628 static int nds32_select_memory_mode(struct target *target, uint32_t address,
629 uint32_t length, uint32_t *end_address)
630 {
631 struct nds32 *nds32 = target_to_nds32(target);
632 struct aice_port_s *aice = target_to_aice(target);
633 struct nds32_memory *memory = &(nds32->memory);
634 struct nds32_edm *edm = &(nds32->edm);
635 uint32_t dlm_start, dlm_end;
636 uint32_t ilm_start, ilm_end;
637 uint32_t address_end = address + length;
638
639 /* init end_address */
640 *end_address = address_end;
641
642 if (NDS_MEMORY_ACC_CPU == memory->access_channel)
643 return ERROR_OK;
644
645 if (edm->access_control == false) {
646 LOG_DEBUG("EDM does not support ACC_CTL");
647 return ERROR_OK;
648 }
649
650 if (edm->direct_access_local_memory == false) {
651 LOG_DEBUG("EDM does not support DALM");
652 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
653 return ERROR_OK;
654 }
655
656 if (NDS_MEMORY_SELECT_AUTO != memory->mode) {
657 LOG_DEBUG("Memory mode is not AUTO");
658 return ERROR_OK;
659 }
660
661 /* set default mode */
662 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
663
664 if ((memory->ilm_base != 0) && (memory->ilm_enable == true)) {
665 ilm_start = memory->ilm_start;
666 ilm_end = memory->ilm_end;
667
668 /* case 1, address < ilm_start */
669 if (address < ilm_start) {
670 if (ilm_start < address_end) {
671 /* update end_address to split non-ILM from ILM */
672 *end_address = ilm_start;
673 }
674 /* MEM mode */
675 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
676 } else if ((ilm_start <= address) && (address < ilm_end)) {
677 /* case 2, ilm_start <= address < ilm_end */
678 if (ilm_end < address_end) {
679 /* update end_address to split non-ILM from ILM */
680 *end_address = ilm_end;
681 }
682 /* ILM mode */
683 aice_memory_mode(aice, NDS_MEMORY_SELECT_ILM);
684 } else { /* case 3, ilm_end <= address */
685 /* MEM mode */
686 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
687 }
688
689 return ERROR_OK;
690 } else {
691 LOG_DEBUG("ILM is not enabled");
692 }
693
694 if ((memory->dlm_base != 0) && (memory->dlm_enable == true)) {
695 dlm_start = memory->dlm_start;
696 dlm_end = memory->dlm_end;
697
698 /* case 1, address < dlm_start */
699 if (address < dlm_start) {
700 if (dlm_start < address_end) {
701 /* update end_address to split non-DLM from DLM */
702 *end_address = dlm_start;
703 }
704 /* MEM mode */
705 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
706 } else if ((dlm_start <= address) && (address < dlm_end)) {
707 /* case 2, dlm_start <= address < dlm_end */
708 if (dlm_end < address_end) {
709 /* update end_address to split non-DLM from DLM */
710 *end_address = dlm_end;
711 }
712 /* DLM mode */
713 aice_memory_mode(aice, NDS_MEMORY_SELECT_DLM);
714 } else { /* case 3, dlm_end <= address */
715 /* MEM mode */
716 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
717 }
718
719 return ERROR_OK;
720 } else {
721 LOG_DEBUG("DLM is not enabled");
722 }
723
724 return ERROR_OK;
725 }
726
727 int nds32_read_buffer(struct target *target, uint32_t address,
728 uint32_t size, uint8_t *buffer)
729 {
730 struct nds32 *nds32 = target_to_nds32(target);
731 struct nds32_memory *memory = &(nds32->memory);
732
733 if ((NDS_MEMORY_ACC_CPU == memory->access_channel) &&
734 (target->state != TARGET_HALTED)) {
735 LOG_WARNING("target was not halted");
736 return ERROR_TARGET_NOT_HALTED;
737 }
738
739 LOG_DEBUG("READ BUFFER: ADDR %08" PRIx32 " SIZE %08" PRIx32,
740 address,
741 size);
742
743 int retval = ERROR_OK;
744 struct aice_port_s *aice = target_to_aice(target);
745 uint32_t end_address;
746
747 if (((address % 2) == 0) && (size == 2)) {
748 nds32_select_memory_mode(target, address, 2, &end_address);
749 return aice_read_mem_unit(aice, address, 2, 1, buffer);
750 }
751
752 /* handle unaligned head bytes */
753 if (address % 4) {
754 uint32_t unaligned = 4 - (address % 4);
755
756 if (unaligned > size)
757 unaligned = size;
758
759 nds32_select_memory_mode(target, address, unaligned, &end_address);
760 retval = aice_read_mem_unit(aice, address, 1, unaligned, buffer);
761 if (retval != ERROR_OK)
762 return retval;
763
764 buffer += unaligned;
765 address += unaligned;
766 size -= unaligned;
767 }
768
769 /* handle aligned words */
770 if (size >= 4) {
771 int aligned = size - (size % 4);
772 int read_len;
773
774 do {
775 nds32_select_memory_mode(target, address, aligned, &end_address);
776
777 read_len = end_address - address;
778
779 if (read_len > 8)
780 retval = aice_read_mem_bulk(aice, address, read_len, buffer);
781 else
782 retval = aice_read_mem_unit(aice, address, 4, read_len / 4, buffer);
783
784 if (retval != ERROR_OK)
785 return retval;
786
787 buffer += read_len;
788 address += read_len;
789 size -= read_len;
790 aligned -= read_len;
791
792 } while (aligned != 0);
793 }
794
795 /*prevent byte access when possible (avoid AHB access limitations in some cases)*/
796 if (size >= 2) {
797 int aligned = size - (size % 2);
798 nds32_select_memory_mode(target, address, aligned, &end_address);
799 retval = aice_read_mem_unit(aice, address, 2, aligned / 2, buffer);
800 if (retval != ERROR_OK)
801 return retval;
802
803 buffer += aligned;
804 address += aligned;
805 size -= aligned;
806 }
807 /* handle tail writes of less than 4 bytes */
808 if (size > 0) {
809 nds32_select_memory_mode(target, address, size, &end_address);
810 retval = aice_read_mem_unit(aice, address, 1, size, buffer);
811 if (retval != ERROR_OK)
812 return retval;
813 }
814
815 return ERROR_OK;
816 }
817
818 int nds32_read_memory(struct target *target, uint32_t address,
819 uint32_t size, uint32_t count, uint8_t *buffer)
820 {
821 struct aice_port_s *aice = target_to_aice(target);
822
823 return aice_read_mem_unit(aice, address, size, count, buffer);
824 }
825
826 int nds32_read_phys_memory(struct target *target, target_addr_t address,
827 uint32_t size, uint32_t count, uint8_t *buffer)
828 {
829 struct aice_port_s *aice = target_to_aice(target);
830 struct nds32 *nds32 = target_to_nds32(target);
831 struct nds32_memory *memory = &(nds32->memory);
832 enum nds_memory_access orig_channel;
833 int result;
834
835 /* switch to BUS access mode to skip MMU */
836 orig_channel = memory->access_channel;
837 memory->access_channel = NDS_MEMORY_ACC_BUS;
838 aice_memory_access(aice, memory->access_channel);
839
840 /* The input address is physical address. No need to do address translation. */
841 result = aice_read_mem_unit(aice, address, size, count, buffer);
842
843 /* restore to origin access mode */
844 memory->access_channel = orig_channel;
845 aice_memory_access(aice, memory->access_channel);
846
847 return result;
848 }
849
850 int nds32_write_buffer(struct target *target, uint32_t address,
851 uint32_t size, const uint8_t *buffer)
852 {
853 struct nds32 *nds32 = target_to_nds32(target);
854 struct nds32_memory *memory = &(nds32->memory);
855
856 if ((NDS_MEMORY_ACC_CPU == memory->access_channel) &&
857 (target->state != TARGET_HALTED)) {
858 LOG_WARNING("target was not halted");
859 return ERROR_TARGET_NOT_HALTED;
860 }
861
862 LOG_DEBUG("WRITE BUFFER: ADDR %08" PRIx32 " SIZE %08" PRIx32,
863 address,
864 size);
865
866 struct aice_port_s *aice = target_to_aice(target);
867 int retval = ERROR_OK;
868 uint32_t end_address;
869
870 if (((address % 2) == 0) && (size == 2)) {
871 nds32_select_memory_mode(target, address, 2, &end_address);
872 return aice_write_mem_unit(aice, address, 2, 1, buffer);
873 }
874
875 /* handle unaligned head bytes */
876 if (address % 4) {
877 uint32_t unaligned = 4 - (address % 4);
878
879 if (unaligned > size)
880 unaligned = size;
881
882 nds32_select_memory_mode(target, address, unaligned, &end_address);
883 retval = aice_write_mem_unit(aice, address, 1, unaligned, buffer);
884 if (retval != ERROR_OK)
885 return retval;
886
887 buffer += unaligned;
888 address += unaligned;
889 size -= unaligned;
890 }
891
892 /* handle aligned words */
893 if (size >= 4) {
894 int aligned = size - (size % 4);
895 int write_len;
896
897 do {
898 nds32_select_memory_mode(target, address, aligned, &end_address);
899
900 write_len = end_address - address;
901 if (write_len > 8)
902 retval = aice_write_mem_bulk(aice, address, write_len, buffer);
903 else
904 retval = aice_write_mem_unit(aice, address, 4, write_len / 4, buffer);
905 if (retval != ERROR_OK)
906 return retval;
907
908 buffer += write_len;
909 address += write_len;
910 size -= write_len;
911 aligned -= write_len;
912
913 } while (aligned != 0);
914 }
915
916 /* handle tail writes of less than 4 bytes */
917 if (size > 0) {
918 nds32_select_memory_mode(target, address, size, &end_address);
919 retval = aice_write_mem_unit(aice, address, 1, size, buffer);
920 if (retval != ERROR_OK)
921 return retval;
922 }
923
924 return retval;
925 }
926
927 int nds32_write_memory(struct target *target, uint32_t address,
928 uint32_t size, uint32_t count, const uint8_t *buffer)
929 {
930 struct aice_port_s *aice = target_to_aice(target);
931
932 return aice_write_mem_unit(aice, address, size, count, buffer);
933 }
934
935 int nds32_write_phys_memory(struct target *target, target_addr_t address,
936 uint32_t size, uint32_t count, const uint8_t *buffer)
937 {
938 struct aice_port_s *aice = target_to_aice(target);
939 struct nds32 *nds32 = target_to_nds32(target);
940 struct nds32_memory *memory = &(nds32->memory);
941 enum nds_memory_access orig_channel;
942 int result;
943
944 /* switch to BUS access mode to skip MMU */
945 orig_channel = memory->access_channel;
946 memory->access_channel = NDS_MEMORY_ACC_BUS;
947 aice_memory_access(aice, memory->access_channel);
948
949 /* The input address is physical address. No need to do address translation. */
950 result = aice_write_mem_unit(aice, address, size, count, buffer);
951
952 /* restore to origin access mode */
953 memory->access_channel = orig_channel;
954 aice_memory_access(aice, memory->access_channel);
955
956 return result;
957 }
958
959 int nds32_mmu(struct target *target, int *enabled)
960 {
961 if (target->state != TARGET_HALTED) {
962 LOG_ERROR("%s: target not halted", __func__);
963 return ERROR_TARGET_INVALID;
964 }
965
966 struct nds32 *nds32 = target_to_nds32(target);
967 struct nds32_memory *memory = &(nds32->memory);
968 struct nds32_mmu_config *mmu_config = &(nds32->mmu_config);
969
970 if ((mmu_config->memory_protection == 2) && (memory->address_translation == true))
971 *enabled = 1;
972 else
973 *enabled = 0;
974
975 return ERROR_OK;
976 }
977
978 int nds32_arch_state(struct target *target)
979 {
980 struct nds32 *nds32 = target_to_nds32(target);
981
982 if (nds32->common_magic != NDS32_COMMON_MAGIC) {
983 LOG_ERROR("BUG: called for a non-Andes target");
984 return ERROR_FAIL;
985 }
986
987 uint32_t value_pc, value_psw;
988
989 nds32_get_mapped_reg(nds32, PC, &value_pc);
990 nds32_get_mapped_reg(nds32, IR0, &value_psw);
991
992 LOG_USER("target halted due to %s\n"
993 "psw: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "%s",
994 debug_reason_name(target),
995 value_psw,
996 value_pc,
997 nds32->virtual_hosting ? ", virtual hosting" : "");
998
999 /* save pc value to pseudo register pc */
1000 struct reg *reg = register_get_by_name(target->reg_cache, "pc", 1);
1001 buf_set_u32(reg->value, 0, 32, value_pc);
1002
1003 return ERROR_OK;
1004 }
1005
1006 static void nds32_init_must_have_registers(struct nds32 *nds32)
1007 {
1008 struct reg_cache *reg_cache = nds32->core_cache;
1009
1010 /** MUST have general registers */
1011 ((struct nds32_reg *)reg_cache->reg_list[R0].arch_info)->enable = true;
1012 ((struct nds32_reg *)reg_cache->reg_list[R1].arch_info)->enable = true;
1013 ((struct nds32_reg *)reg_cache->reg_list[R2].arch_info)->enable = true;
1014 ((struct nds32_reg *)reg_cache->reg_list[R3].arch_info)->enable = true;
1015 ((struct nds32_reg *)reg_cache->reg_list[R4].arch_info)->enable = true;
1016 ((struct nds32_reg *)reg_cache->reg_list[R5].arch_info)->enable = true;
1017 ((struct nds32_reg *)reg_cache->reg_list[R6].arch_info)->enable = true;
1018 ((struct nds32_reg *)reg_cache->reg_list[R7].arch_info)->enable = true;
1019 ((struct nds32_reg *)reg_cache->reg_list[R8].arch_info)->enable = true;
1020 ((struct nds32_reg *)reg_cache->reg_list[R9].arch_info)->enable = true;
1021 ((struct nds32_reg *)reg_cache->reg_list[R10].arch_info)->enable = true;
1022 ((struct nds32_reg *)reg_cache->reg_list[R15].arch_info)->enable = true;
1023 ((struct nds32_reg *)reg_cache->reg_list[R28].arch_info)->enable = true;
1024 ((struct nds32_reg *)reg_cache->reg_list[R29].arch_info)->enable = true;
1025 ((struct nds32_reg *)reg_cache->reg_list[R30].arch_info)->enable = true;
1026 ((struct nds32_reg *)reg_cache->reg_list[R31].arch_info)->enable = true;
1027 ((struct nds32_reg *)reg_cache->reg_list[PC].arch_info)->enable = true;
1028
1029 /** MUST have configuration system registers */
1030 ((struct nds32_reg *)reg_cache->reg_list[CR0].arch_info)->enable = true;
1031 ((struct nds32_reg *)reg_cache->reg_list[CR1].arch_info)->enable = true;
1032 ((struct nds32_reg *)reg_cache->reg_list[CR2].arch_info)->enable = true;
1033 ((struct nds32_reg *)reg_cache->reg_list[CR3].arch_info)->enable = true;
1034 ((struct nds32_reg *)reg_cache->reg_list[CR4].arch_info)->enable = true;
1035
1036 /** MUST have interrupt system registers */
1037 ((struct nds32_reg *)reg_cache->reg_list[IR0].arch_info)->enable = true;
1038 ((struct nds32_reg *)reg_cache->reg_list[IR1].arch_info)->enable = true;
1039 ((struct nds32_reg *)reg_cache->reg_list[IR3].arch_info)->enable = true;
1040 ((struct nds32_reg *)reg_cache->reg_list[IR4].arch_info)->enable = true;
1041 ((struct nds32_reg *)reg_cache->reg_list[IR6].arch_info)->enable = true;
1042 ((struct nds32_reg *)reg_cache->reg_list[IR9].arch_info)->enable = true;
1043 ((struct nds32_reg *)reg_cache->reg_list[IR11].arch_info)->enable = true;
1044 ((struct nds32_reg *)reg_cache->reg_list[IR14].arch_info)->enable = true;
1045 ((struct nds32_reg *)reg_cache->reg_list[IR15].arch_info)->enable = true;
1046
1047 /** MUST have MMU system registers */
1048 ((struct nds32_reg *)reg_cache->reg_list[MR0].arch_info)->enable = true;
1049
1050 /** MUST have EDM system registers */
1051 ((struct nds32_reg *)reg_cache->reg_list[DR40].arch_info)->enable = true;
1052 ((struct nds32_reg *)reg_cache->reg_list[DR42].arch_info)->enable = true;
1053 }
1054
1055 static int nds32_init_memory_config(struct nds32 *nds32)
1056 {
1057 uint32_t value_cr1; /* ICM_CFG */
1058 uint32_t value_cr2; /* DCM_CFG */
1059 struct nds32_memory *memory = &(nds32->memory);
1060
1061 /* read $cr1 to init instruction memory information */
1062 nds32_get_mapped_reg(nds32, CR1, &value_cr1);
1063 memory->icache.set = value_cr1 & 0x7;
1064 memory->icache.way = (value_cr1 >> 3) & 0x7;
1065 memory->icache.line_size = (value_cr1 >> 6) & 0x7;
1066 memory->icache.lock_support = (value_cr1 >> 9) & 0x1;
1067
1068 memory->ilm_base = (value_cr1 >> 10) & 0x7;
1069 memory->ilm_align_ver = (value_cr1 >> 13) & 0x3;
1070
1071 /* read $cr2 to init data memory information */
1072 nds32_get_mapped_reg(nds32, CR2, &value_cr2);
1073 memory->dcache.set = value_cr2 & 0x7;
1074 memory->dcache.way = (value_cr2 >> 3) & 0x7;
1075 memory->dcache.line_size = (value_cr2 >> 6) & 0x7;
1076 memory->dcache.lock_support = (value_cr2 >> 9) & 0x1;
1077
1078 memory->dlm_base = (value_cr2 >> 10) & 0x7;
1079 memory->dlm_align_ver = (value_cr2 >> 13) & 0x3;
1080
1081 return ERROR_OK;
1082 }
1083
1084 static void nds32_init_config(struct nds32 *nds32)
1085 {
1086 uint32_t value_cr0;
1087 uint32_t value_cr3;
1088 uint32_t value_cr4;
1089 struct nds32_cpu_version *cpu_version = &(nds32->cpu_version);
1090 struct nds32_mmu_config *mmu_config = &(nds32->mmu_config);
1091 struct nds32_misc_config *misc_config = &(nds32->misc_config);
1092
1093 nds32_get_mapped_reg(nds32, CR0, &value_cr0);
1094 nds32_get_mapped_reg(nds32, CR3, &value_cr3);
1095 nds32_get_mapped_reg(nds32, CR4, &value_cr4);
1096
1097 /* config cpu version */
1098 cpu_version->performance_extension = value_cr0 & 0x1;
1099 cpu_version->_16bit_extension = (value_cr0 >> 1) & 0x1;
1100 cpu_version->performance_extension_2 = (value_cr0 >> 2) & 0x1;
1101 cpu_version->cop_fpu_extension = (value_cr0 >> 3) & 0x1;
1102 cpu_version->string_extension = (value_cr0 >> 4) & 0x1;
1103 cpu_version->revision = (value_cr0 >> 16) & 0xFF;
1104 cpu_version->cpu_id_family = (value_cr0 >> 24) & 0xF;
1105 cpu_version->cpu_id_version = (value_cr0 >> 28) & 0xF;
1106
1107 /* config MMU */
1108 mmu_config->memory_protection = value_cr3 & 0x3;
1109 mmu_config->memory_protection_version = (value_cr3 >> 2) & 0x1F;
1110 mmu_config->fully_associative_tlb = (value_cr3 >> 7) & 0x1;
1111 if (mmu_config->fully_associative_tlb) {
1112 mmu_config->tlb_size = (value_cr3 >> 8) & 0x7F;
1113 } else {
1114 mmu_config->tlb_ways = (value_cr3 >> 8) & 0x7;
1115 mmu_config->tlb_sets = (value_cr3 >> 11) & 0x7;
1116 }
1117 mmu_config->_8k_page_support = (value_cr3 >> 15) & 0x1;
1118 mmu_config->extra_page_size_support = (value_cr3 >> 16) & 0xFF;
1119 mmu_config->tlb_lock = (value_cr3 >> 24) & 0x1;
1120 mmu_config->hardware_page_table_walker = (value_cr3 >> 25) & 0x1;
1121 mmu_config->default_endian = (value_cr3 >> 26) & 0x1;
1122 mmu_config->partition_num = (value_cr3 >> 27) & 0x1;
1123 mmu_config->invisible_tlb = (value_cr3 >> 28) & 0x1;
1124 mmu_config->vlpt = (value_cr3 >> 29) & 0x1;
1125 mmu_config->ntme = (value_cr3 >> 30) & 0x1;
1126 mmu_config->drde = (value_cr3 >> 31) & 0x1;
1127
1128 /* config misc */
1129 misc_config->edm = value_cr4 & 0x1;
1130 misc_config->local_memory_dma = (value_cr4 >> 1) & 0x1;
1131 misc_config->performance_monitor = (value_cr4 >> 2) & 0x1;
1132 misc_config->high_speed_memory_port = (value_cr4 >> 3) & 0x1;
1133 misc_config->debug_tracer = (value_cr4 >> 4) & 0x1;
1134 misc_config->div_instruction = (value_cr4 >> 5) & 0x1;
1135 misc_config->mac_instruction = (value_cr4 >> 6) & 0x1;
1136 misc_config->audio_isa = (value_cr4 >> 7) & 0x3;
1137 misc_config->L2_cache = (value_cr4 >> 9) & 0x1;
1138 misc_config->reduce_register = (value_cr4 >> 10) & 0x1;
1139 misc_config->addr_24 = (value_cr4 >> 11) & 0x1;
1140 misc_config->interruption_level = (value_cr4 >> 12) & 0x1;
1141 misc_config->baseline_instruction = (value_cr4 >> 13) & 0x7;
1142 misc_config->no_dx_register = (value_cr4 >> 16) & 0x1;
1143 misc_config->implement_dependant_register = (value_cr4 >> 17) & 0x1;
1144 misc_config->implement_dependant_sr_encoding = (value_cr4 >> 18) & 0x1;
1145 misc_config->ifc = (value_cr4 >> 19) & 0x1;
1146 misc_config->mcu = (value_cr4 >> 20) & 0x1;
1147 misc_config->shadow = (value_cr4 >> 21) & 0x7;
1148 misc_config->ex9 = (value_cr4 >> 24) & 0x1;
1149
1150 nds32_init_memory_config(nds32);
1151 }
1152
1153 static int nds32_init_option_registers(struct nds32 *nds32)
1154 {
1155 struct reg_cache *reg_cache = nds32->core_cache;
1156 struct nds32_cpu_version *cpu_version = &(nds32->cpu_version);
1157 struct nds32_mmu_config *mmu_config = &(nds32->mmu_config);
1158 struct nds32_misc_config *misc_config = &(nds32->misc_config);
1159 struct nds32_memory *memory_config = &(nds32->memory);
1160
1161 bool no_cr5;
1162 bool mr10_exist;
1163 bool no_racr0;
1164
1165 if (((cpu_version->cpu_id_family == 0xC) || (cpu_version->cpu_id_family == 0xD)) &&
1166 ((cpu_version->revision & 0xFC) == 0)) {
1167 no_cr5 = true;
1168 mr10_exist = true;
1169 no_racr0 = true;
1170 } else {
1171 no_cr5 = false;
1172 mr10_exist = false;
1173 no_racr0 = false;
1174 }
1175
1176 if (misc_config->reduce_register == false) {
1177 ((struct nds32_reg *)reg_cache->reg_list[R11].arch_info)->enable = true;
1178 ((struct nds32_reg *)reg_cache->reg_list[R12].arch_info)->enable = true;
1179 ((struct nds32_reg *)reg_cache->reg_list[R13].arch_info)->enable = true;
1180 ((struct nds32_reg *)reg_cache->reg_list[R14].arch_info)->enable = true;
1181 ((struct nds32_reg *)reg_cache->reg_list[R16].arch_info)->enable = true;
1182 ((struct nds32_reg *)reg_cache->reg_list[R17].arch_info)->enable = true;
1183 ((struct nds32_reg *)reg_cache->reg_list[R18].arch_info)->enable = true;
1184 ((struct nds32_reg *)reg_cache->reg_list[R19].arch_info)->enable = true;
1185 ((struct nds32_reg *)reg_cache->reg_list[R20].arch_info)->enable = true;
1186 ((struct nds32_reg *)reg_cache->reg_list[R21].arch_info)->enable = true;
1187 ((struct nds32_reg *)reg_cache->reg_list[R22].arch_info)->enable = true;
1188 ((struct nds32_reg *)reg_cache->reg_list[R23].arch_info)->enable = true;
1189 ((struct nds32_reg *)reg_cache->reg_list[R24].arch_info)->enable = true;
1190 ((struct nds32_reg *)reg_cache->reg_list[R25].arch_info)->enable = true;
1191 ((struct nds32_reg *)reg_cache->reg_list[R26].arch_info)->enable = true;
1192 ((struct nds32_reg *)reg_cache->reg_list[R27].arch_info)->enable = true;
1193 }
1194
1195 if (misc_config->no_dx_register == false) {
1196 ((struct nds32_reg *)reg_cache->reg_list[D0LO].arch_info)->enable = true;
1197 ((struct nds32_reg *)reg_cache->reg_list[D0HI].arch_info)->enable = true;
1198 ((struct nds32_reg *)reg_cache->reg_list[D1LO].arch_info)->enable = true;
1199 ((struct nds32_reg *)reg_cache->reg_list[D1HI].arch_info)->enable = true;
1200 }
1201
1202 if (misc_config->ex9)
1203 ((struct nds32_reg *)reg_cache->reg_list[ITB].arch_info)->enable = true;
1204
1205 if (no_cr5 == false)
1206 ((struct nds32_reg *)reg_cache->reg_list[CR5].arch_info)->enable = true;
1207
1208 if (cpu_version->cop_fpu_extension) {
1209 ((struct nds32_reg *)reg_cache->reg_list[CR6].arch_info)->enable = true;
1210 ((struct nds32_reg *)reg_cache->reg_list[FPCSR].arch_info)->enable = true;
1211 ((struct nds32_reg *)reg_cache->reg_list[FPCFG].arch_info)->enable = true;
1212 }
1213
1214 if (mmu_config->memory_protection == 1) {
1215 /* Secure MPU has no IPC, IPSW, P_ITYPE */
1216 ((struct nds32_reg *)reg_cache->reg_list[IR1].arch_info)->enable = false;
1217 ((struct nds32_reg *)reg_cache->reg_list[IR9].arch_info)->enable = false;
1218 }
1219
1220 if (nds32->privilege_level != 0)
1221 ((struct nds32_reg *)reg_cache->reg_list[IR3].arch_info)->enable = false;
1222
1223 if (misc_config->mcu == true)
1224 ((struct nds32_reg *)reg_cache->reg_list[IR4].arch_info)->enable = false;
1225
1226 if (misc_config->interruption_level == false) {
1227 ((struct nds32_reg *)reg_cache->reg_list[IR2].arch_info)->enable = true;
1228 ((struct nds32_reg *)reg_cache->reg_list[IR5].arch_info)->enable = true;
1229 ((struct nds32_reg *)reg_cache->reg_list[IR10].arch_info)->enable = true;
1230 ((struct nds32_reg *)reg_cache->reg_list[IR12].arch_info)->enable = true;
1231 ((struct nds32_reg *)reg_cache->reg_list[IR13].arch_info)->enable = true;
1232
1233 /* Secure MPU has no IPC, IPSW, P_ITYPE */
1234 if (mmu_config->memory_protection != 1)
1235 ((struct nds32_reg *)reg_cache->reg_list[IR7].arch_info)->enable = true;
1236 }
1237
1238 if ((cpu_version->cpu_id_family == 0x9) ||
1239 (cpu_version->cpu_id_family == 0xA) ||
1240 (cpu_version->cpu_id_family == 0xC) ||
1241 (cpu_version->cpu_id_family == 0xD))
1242 ((struct nds32_reg *)reg_cache->reg_list[IR8].arch_info)->enable = true;
1243
1244 if (misc_config->shadow == 1) {
1245 ((struct nds32_reg *)reg_cache->reg_list[IR16].arch_info)->enable = true;
1246 ((struct nds32_reg *)reg_cache->reg_list[IR17].arch_info)->enable = true;
1247 }
1248
1249 if (misc_config->ifc)
1250 ((struct nds32_reg *)reg_cache->reg_list[IFC_LP].arch_info)->enable = true;
1251
1252 if (nds32->privilege_level != 0)
1253 ((struct nds32_reg *)reg_cache->reg_list[MR0].arch_info)->enable = false;
1254
1255 if (mmu_config->memory_protection == 1) {
1256 if (mmu_config->memory_protection_version == 24)
1257 ((struct nds32_reg *)reg_cache->reg_list[MR4].arch_info)->enable = true;
1258
1259 if (nds32->privilege_level == 0) {
1260 if ((mmu_config->memory_protection_version == 16) ||
1261 (mmu_config->memory_protection_version == 24)) {
1262 ((struct nds32_reg *)reg_cache->reg_list[MR11].arch_info)->enable = true;
1263 ((struct nds32_reg *)reg_cache->reg_list[SECUR0].arch_info)->enable = true;
1264 ((struct nds32_reg *)reg_cache->reg_list[IR20].arch_info)->enable = true;
1265 ((struct nds32_reg *)reg_cache->reg_list[IR22].arch_info)->enable = true;
1266 ((struct nds32_reg *)reg_cache->reg_list[IR24].arch_info)->enable = true;
1267 ((struct nds32_reg *)reg_cache->reg_list[IR30].arch_info)->enable = true;
1268
1269 if (misc_config->shadow == 1) {
1270 ((struct nds32_reg *)reg_cache->reg_list[IR21].arch_info)->enable = true;
1271 ((struct nds32_reg *)reg_cache->reg_list[IR23].arch_info)->enable = true;
1272 ((struct nds32_reg *)reg_cache->reg_list[IR25].arch_info)->enable = true;
1273 }
1274 }
1275 }
1276 } else if (mmu_config->memory_protection == 2) {
1277 ((struct nds32_reg *)reg_cache->reg_list[MR1].arch_info)->enable = true;
1278 ((struct nds32_reg *)reg_cache->reg_list[MR4].arch_info)->enable = true;
1279
1280 if ((cpu_version->cpu_id_family != 0xA) && (cpu_version->cpu_id_family != 0xC) &&
1281 (cpu_version->cpu_id_family != 0xD))
1282 ((struct nds32_reg *)reg_cache->reg_list[MR5].arch_info)->enable = true;
1283 }
1284
1285 if (mmu_config->memory_protection > 0) {
1286 ((struct nds32_reg *)reg_cache->reg_list[MR2].arch_info)->enable = true;
1287 ((struct nds32_reg *)reg_cache->reg_list[MR3].arch_info)->enable = true;
1288 }
1289
1290 if (memory_config->ilm_base != 0)
1291 if (nds32->privilege_level == 0)
1292 ((struct nds32_reg *)reg_cache->reg_list[MR6].arch_info)->enable = true;
1293
1294 if (memory_config->dlm_base != 0)
1295 if (nds32->privilege_level == 0)
1296 ((struct nds32_reg *)reg_cache->reg_list[MR7].arch_info)->enable = true;
1297
1298 if ((memory_config->icache.line_size != 0) && (memory_config->dcache.line_size != 0))
1299 ((struct nds32_reg *)reg_cache->reg_list[MR8].arch_info)->enable = true;
1300
1301 if (misc_config->high_speed_memory_port)
1302 ((struct nds32_reg *)reg_cache->reg_list[MR9].arch_info)->enable = true;
1303
1304 if (mr10_exist)
1305 ((struct nds32_reg *)reg_cache->reg_list[MR10].arch_info)->enable = true;
1306
1307 if (misc_config->edm) {
1308 int dr_reg_n = nds32->edm.breakpoint_num * 5;
1309
1310 for (int i = 0 ; i < dr_reg_n ; i++)
1311 ((struct nds32_reg *)reg_cache->reg_list[DR0 + i].arch_info)->enable = true;
1312
1313 ((struct nds32_reg *)reg_cache->reg_list[DR41].arch_info)->enable = true;
1314 ((struct nds32_reg *)reg_cache->reg_list[DR43].arch_info)->enable = true;
1315 ((struct nds32_reg *)reg_cache->reg_list[DR44].arch_info)->enable = true;
1316 ((struct nds32_reg *)reg_cache->reg_list[DR45].arch_info)->enable = true;
1317 }
1318
1319 if (misc_config->debug_tracer) {
1320 ((struct nds32_reg *)reg_cache->reg_list[DR46].arch_info)->enable = true;
1321 ((struct nds32_reg *)reg_cache->reg_list[DR47].arch_info)->enable = true;
1322 }
1323
1324 if (misc_config->performance_monitor) {
1325 ((struct nds32_reg *)reg_cache->reg_list[PFR0].arch_info)->enable = true;
1326 ((struct nds32_reg *)reg_cache->reg_list[PFR1].arch_info)->enable = true;
1327 ((struct nds32_reg *)reg_cache->reg_list[PFR2].arch_info)->enable = true;
1328 ((struct nds32_reg *)reg_cache->reg_list[PFR3].arch_info)->enable = true;
1329 }
1330
1331 if (misc_config->local_memory_dma) {
1332 ((struct nds32_reg *)reg_cache->reg_list[DMAR0].arch_info)->enable = true;
1333 ((struct nds32_reg *)reg_cache->reg_list[DMAR1].arch_info)->enable = true;
1334 ((struct nds32_reg *)reg_cache->reg_list[DMAR2].arch_info)->enable = true;
1335 ((struct nds32_reg *)reg_cache->reg_list[DMAR3].arch_info)->enable = true;
1336 ((struct nds32_reg *)reg_cache->reg_list[DMAR4].arch_info)->enable = true;
1337 ((struct nds32_reg *)reg_cache->reg_list[DMAR5].arch_info)->enable = true;
1338 ((struct nds32_reg *)reg_cache->reg_list[DMAR6].arch_info)->enable = true;
1339 ((struct nds32_reg *)reg_cache->reg_list[DMAR7].arch_info)->enable = true;
1340 ((struct nds32_reg *)reg_cache->reg_list[DMAR8].arch_info)->enable = true;
1341 ((struct nds32_reg *)reg_cache->reg_list[DMAR9].arch_info)->enable = true;
1342 ((struct nds32_reg *)reg_cache->reg_list[DMAR10].arch_info)->enable = true;
1343 }
1344
1345 if ((misc_config->local_memory_dma || misc_config->performance_monitor) &&
1346 (no_racr0 == false))
1347 ((struct nds32_reg *)reg_cache->reg_list[RACR].arch_info)->enable = true;
1348
1349 if (cpu_version->cop_fpu_extension || (misc_config->audio_isa != 0))
1350 ((struct nds32_reg *)reg_cache->reg_list[FUCPR].arch_info)->enable = true;
1351
1352 if (misc_config->audio_isa != 0) {
1353 if (misc_config->audio_isa > 1) {
1354 ((struct nds32_reg *)reg_cache->reg_list[D0L24].arch_info)->enable = true;
1355 ((struct nds32_reg *)reg_cache->reg_list[D1L24].arch_info)->enable = true;
1356 }
1357
1358 ((struct nds32_reg *)reg_cache->reg_list[I0].arch_info)->enable = true;
1359 ((struct nds32_reg *)reg_cache->reg_list[I1].arch_info)->enable = true;
1360 ((struct nds32_reg *)reg_cache->reg_list[I2].arch_info)->enable = true;
1361 ((struct nds32_reg *)reg_cache->reg_list[I3].arch_info)->enable = true;
1362 ((struct nds32_reg *)reg_cache->reg_list[I4].arch_info)->enable = true;
1363 ((struct nds32_reg *)reg_cache->reg_list[I5].arch_info)->enable = true;
1364 ((struct nds32_reg *)reg_cache->reg_list[I6].arch_info)->enable = true;
1365 ((struct nds32_reg *)reg_cache->reg_list[I7].arch_info)->enable = true;
1366 ((struct nds32_reg *)reg_cache->reg_list[M1].arch_info)->enable = true;
1367 ((struct nds32_reg *)reg_cache->reg_list[M2].arch_info)->enable = true;
1368 ((struct nds32_reg *)reg_cache->reg_list[M3].arch_info)->enable = true;
1369 ((struct nds32_reg *)reg_cache->reg_list[M5].arch_info)->enable = true;
1370 ((struct nds32_reg *)reg_cache->reg_list[M6].arch_info)->enable = true;
1371 ((struct nds32_reg *)reg_cache->reg_list[M7].arch_info)->enable = true;
1372 ((struct nds32_reg *)reg_cache->reg_list[MOD].arch_info)->enable = true;
1373 ((struct nds32_reg *)reg_cache->reg_list[LBE].arch_info)->enable = true;
1374 ((struct nds32_reg *)reg_cache->reg_list[LE].arch_info)->enable = true;
1375 ((struct nds32_reg *)reg_cache->reg_list[LC].arch_info)->enable = true;
1376 ((struct nds32_reg *)reg_cache->reg_list[ADM_VBASE].arch_info)->enable = true;
1377 ((struct nds32_reg *)reg_cache->reg_list[SHFT_CTL0].arch_info)->enable = true;
1378 ((struct nds32_reg *)reg_cache->reg_list[SHFT_CTL1].arch_info)->enable = true;
1379
1380 uint32_t value_mod;
1381 uint32_t fucpr_backup;
1382 /* enable fpu and get configuration */
1383 nds32_get_mapped_reg(nds32, FUCPR, &fucpr_backup);
1384 if ((fucpr_backup & 0x80000000) == 0)
1385 nds32_set_mapped_reg(nds32, FUCPR, fucpr_backup | 0x80000000);
1386 nds32_get_mapped_reg(nds32, MOD, &value_mod);
1387 /* restore origin fucpr value */
1388 if ((fucpr_backup & 0x80000000) == 0)
1389 nds32_set_mapped_reg(nds32, FUCPR, fucpr_backup);
1390
1391 if ((value_mod >> 6) & 0x1) {
1392 ((struct nds32_reg *)reg_cache->reg_list[CB_CTL].arch_info)->enable = true;
1393 ((struct nds32_reg *)reg_cache->reg_list[CBB0].arch_info)->enable = true;
1394 ((struct nds32_reg *)reg_cache->reg_list[CBB1].arch_info)->enable = true;
1395 ((struct nds32_reg *)reg_cache->reg_list[CBB2].arch_info)->enable = true;
1396 ((struct nds32_reg *)reg_cache->reg_list[CBB3].arch_info)->enable = true;
1397 ((struct nds32_reg *)reg_cache->reg_list[CBE0].arch_info)->enable = true;
1398 ((struct nds32_reg *)reg_cache->reg_list[CBE1].arch_info)->enable = true;
1399 ((struct nds32_reg *)reg_cache->reg_list[CBE2].arch_info)->enable = true;
1400 ((struct nds32_reg *)reg_cache->reg_list[CBE3].arch_info)->enable = true;
1401 }
1402 }
1403
1404 if ((cpu_version->cpu_id_family == 0x9) ||
1405 (cpu_version->cpu_id_family == 0xA) ||
1406 (cpu_version->cpu_id_family == 0xC)) {
1407
1408 ((struct nds32_reg *)reg_cache->reg_list[IDR0].arch_info)->enable = true;
1409 ((struct nds32_reg *)reg_cache->reg_list[IDR1].arch_info)->enable = true;
1410
1411 if ((cpu_version->cpu_id_family == 0xC) && (cpu_version->revision == 0x0C))
1412 ((struct nds32_reg *)reg_cache->reg_list[IDR0].arch_info)->enable = false;
1413 }
1414
1415 uint32_t ir3_value;
1416 uint32_t ivb_prog_pri_lvl;
1417 uint32_t ivb_ivic_ver;
1418
1419 nds32_get_mapped_reg(nds32, IR3, &ir3_value);
1420 ivb_prog_pri_lvl = ir3_value & 0x1;
1421 ivb_ivic_ver = (ir3_value >> 11) & 0x3;
1422
1423 if ((ivb_prog_pri_lvl == 1) || (ivb_ivic_ver >= 1)) {
1424 ((struct nds32_reg *)reg_cache->reg_list[IR18].arch_info)->enable = true;
1425 ((struct nds32_reg *)reg_cache->reg_list[IR19].arch_info)->enable = true;
1426 }
1427
1428 if (ivb_ivic_ver >= 1) {
1429 ((struct nds32_reg *)reg_cache->reg_list[IR26].arch_info)->enable = true;
1430 ((struct nds32_reg *)reg_cache->reg_list[IR27].arch_info)->enable = true;
1431 ((struct nds32_reg *)reg_cache->reg_list[IR28].arch_info)->enable = true;
1432 ((struct nds32_reg *)reg_cache->reg_list[IR29].arch_info)->enable = true;
1433 }
1434
1435 return ERROR_OK;
1436 }
1437
1438 int nds32_init_register_table(struct nds32 *nds32)
1439 {
1440 nds32_init_must_have_registers(nds32);
1441
1442 return ERROR_OK;
1443 }
1444
1445 int nds32_add_software_breakpoint(struct target *target,
1446 struct breakpoint *breakpoint)
1447 {
1448 uint32_t data;
1449 uint32_t check_data;
1450 uint32_t break_insn;
1451
1452 /* check the breakpoint size */
1453 target->type->read_buffer(target, breakpoint->address, 4, (uint8_t *)&data);
1454
1455 /* backup origin instruction
1456 * instruction is big-endian */
1457 if (*(char *)&data & 0x80) { /* 16-bits instruction */
1458 breakpoint->length = 2;
1459 break_insn = NDS32_BREAK_16;
1460 } else { /* 32-bits instruction */
1461 breakpoint->length = 4;
1462 break_insn = NDS32_BREAK_32;
1463 }
1464
1465 free(breakpoint->orig_instr);
1466
1467 breakpoint->orig_instr = malloc(breakpoint->length);
1468 memcpy(breakpoint->orig_instr, &data, breakpoint->length);
1469
1470 /* self-modified code */
1471 target->type->write_buffer(target, breakpoint->address, breakpoint->length, (const uint8_t *)&break_insn);
1472 /* write_back & invalidate dcache & invalidate icache */
1473 nds32_cache_sync(target, breakpoint->address, breakpoint->length);
1474
1475 /* read back to check */
1476 target->type->read_buffer(target, breakpoint->address, breakpoint->length, (uint8_t *)&check_data);
1477 if (memcmp(&check_data, &break_insn, breakpoint->length) == 0)
1478 return ERROR_OK;
1479
1480 return ERROR_FAIL;
1481 }
1482
1483 int nds32_remove_software_breakpoint(struct target *target,
1484 struct breakpoint *breakpoint)
1485 {
1486 uint32_t check_data;
1487 uint32_t break_insn;
1488
1489 if (breakpoint->length == 2)
1490 break_insn = NDS32_BREAK_16;
1491 else if (breakpoint->length == 4)
1492 break_insn = NDS32_BREAK_32;
1493 else
1494 return ERROR_FAIL;
1495
1496 target->type->read_buffer(target, breakpoint->address, breakpoint->length,
1497 (uint8_t *)&check_data);
1498
1499 /* break instruction is modified */
1500 if (memcmp(&check_data, &break_insn, breakpoint->length) != 0)
1501 return ERROR_FAIL;
1502
1503 /* self-modified code */
1504 target->type->write_buffer(target, breakpoint->address, breakpoint->length,
1505 breakpoint->orig_instr);
1506
1507 /* write_back & invalidate dcache & invalidate icache */
1508 nds32_cache_sync(target, breakpoint->address, breakpoint->length);
1509
1510 return ERROR_OK;
1511 }
1512
1513 /**
1514 * Restore the processor context on an Andes target. The full processor
1515 * context is analyzed to see if any of the registers are dirty on this end, but
1516 * have a valid new value. If this is the case, the processor is changed to the
1517 * appropriate mode and the new register values are written out to the
1518 * processor. If there happens to be a dirty register with an invalid value, an
1519 * error will be logged.
1520 *
1521 * @param target Pointer to the Andes target to have its context restored
1522 * @return Error status if the target is not halted.
1523 */
1524 int nds32_restore_context(struct target *target)
1525 {
1526 struct nds32 *nds32 = target_to_nds32(target);
1527 struct aice_port_s *aice = target_to_aice(target);
1528 struct reg_cache *reg_cache = nds32->core_cache;
1529 struct reg *reg;
1530 struct nds32_reg *reg_arch_info;
1531 unsigned int i;
1532
1533 LOG_DEBUG("-");
1534
1535 if (target->state != TARGET_HALTED) {
1536 LOG_WARNING("target not halted");
1537 return ERROR_TARGET_NOT_HALTED;
1538 }
1539
1540 /* check if there are dirty registers */
1541 for (i = 0; i < reg_cache->num_regs; i++) {
1542 reg = &(reg_cache->reg_list[i]);
1543 if (reg->dirty == true) {
1544 if (reg->valid == true) {
1545
1546 LOG_DEBUG("examining dirty reg: %s", reg->name);
1547 LOG_DEBUG("writing register %d with value 0x%8.8" PRIx32,
1548 i, buf_get_u32(reg->value, 0, 32));
1549
1550 reg_arch_info = reg->arch_info;
1551 if (FD0 <= reg_arch_info->num && reg_arch_info->num <= FD31) {
1552 uint64_t val = buf_get_u64(reg_arch_info->value, 0, 64);
1553 aice_write_reg_64(aice, reg_arch_info->num, val);
1554 } else {
1555 uint32_t val = buf_get_u32(reg_arch_info->value, 0, 32);
1556 aice_write_register(aice, reg_arch_info->num, val);
1557 }
1558
1559 reg->valid = true;
1560 reg->dirty = false;
1561 }
1562 }
1563 }
1564
1565 return ERROR_OK;
1566 }
1567
1568 int nds32_edm_config(struct nds32 *nds32)
1569 {
1570 struct target *target = nds32->target;
1571 struct aice_port_s *aice = target_to_aice(target);
1572 uint32_t edm_cfg;
1573 uint32_t edm_ctl;
1574
1575 aice_read_debug_reg(aice, NDS_EDM_SR_EDM_CFG, &edm_cfg);
1576
1577 nds32->edm.version = (edm_cfg >> 16) & 0xFFFF;
1578 LOG_INFO("EDM version 0x%04x", nds32->edm.version);
1579
1580 nds32->edm.breakpoint_num = (edm_cfg & 0x7) + 1;
1581
1582 if ((nds32->edm.version & 0x1000) || (0x60 <= nds32->edm.version))
1583 nds32->edm.access_control = true;
1584 else
1585 nds32->edm.access_control = false;
1586
1587 if ((edm_cfg >> 4) & 0x1)
1588 nds32->edm.direct_access_local_memory = true;
1589 else
1590 nds32->edm.direct_access_local_memory = false;
1591
1592 if (nds32->edm.version <= 0x20)
1593 nds32->edm.direct_access_local_memory = false;
1594
1595 aice_read_debug_reg(aice, NDS_EDM_SR_EDM_CTL, &edm_ctl);
1596 if (edm_ctl & (0x1 << 29))
1597 nds32->edm.support_max_stop = true;
1598 else
1599 nds32->edm.support_max_stop = false;
1600
1601 /* set passcode for secure MCU */
1602 nds32_login(nds32);
1603
1604 return ERROR_OK;
1605 }
1606
1607 int nds32_config(struct nds32 *nds32)
1608 {
1609 nds32_init_config(nds32);
1610
1611 /* init optional system registers according to config registers */
1612 nds32_init_option_registers(nds32);
1613
1614 /* get max interrupt level */
1615 if (nds32->misc_config.interruption_level)
1616 nds32->max_interrupt_level = 2;
1617 else
1618 nds32->max_interrupt_level = 3;
1619
1620 /* get ILM/DLM size from MR6/MR7 */
1621 uint32_t value_mr6, value_mr7;
1622 uint32_t size_index;
1623 nds32_get_mapped_reg(nds32, MR6, &value_mr6);
1624 size_index = (value_mr6 >> 1) & 0xF;
1625 nds32->memory.ilm_size = NDS32_LM_SIZE_TABLE[size_index];
1626
1627 nds32_get_mapped_reg(nds32, MR7, &value_mr7);
1628 size_index = (value_mr7 >> 1) & 0xF;
1629 nds32->memory.dlm_size = NDS32_LM_SIZE_TABLE[size_index];
1630
1631 return ERROR_OK;
1632 }
1633
1634 int nds32_init_arch_info(struct target *target, struct nds32 *nds32)
1635 {
1636 target->arch_info = nds32;
1637 nds32->target = target;
1638
1639 nds32->common_magic = NDS32_COMMON_MAGIC;
1640 nds32->init_arch_info_after_halted = false;
1641 nds32->auto_convert_hw_bp = true;
1642 nds32->global_stop = false;
1643 nds32->soft_reset_halt = false;
1644 nds32->edm_passcode = NULL;
1645 nds32->privilege_level = 0;
1646 nds32->boot_time = 1500;
1647 nds32->reset_halt_as_examine = false;
1648 nds32->keep_target_edm_ctl = false;
1649 nds32->word_access_mem = false;
1650 nds32->virtual_hosting = true;
1651 nds32->hit_syscall = false;
1652 nds32->active_syscall_id = NDS32_SYSCALL_UNDEFINED;
1653 nds32->virtual_hosting_errno = 0;
1654 nds32->virtual_hosting_ctrl_c = false;
1655 nds32->attached = false;
1656
1657 nds32->syscall_break.asid = 0;
1658 nds32->syscall_break.length = 4;
1659 nds32->syscall_break.set = 0;
1660 nds32->syscall_break.orig_instr = NULL;
1661 nds32->syscall_break.next = NULL;
1662 nds32->syscall_break.unique_id = 0x515CAll + target->target_number;
1663 nds32->syscall_break.linked_brp = 0;
1664
1665 nds32_reg_init();
1666
1667 if (ERROR_FAIL == nds32_reg_cache_init(target, nds32))
1668 return ERROR_FAIL;
1669
1670 if (ERROR_OK != nds32_init_register_table(nds32))
1671 return ERROR_FAIL;
1672
1673 return ERROR_OK;
1674 }
1675
1676 int nds32_virtual_to_physical(struct target *target, target_addr_t address, target_addr_t *physical)
1677 {
1678 struct nds32 *nds32 = target_to_nds32(target);
1679
1680 if (nds32->memory.address_translation == false) {
1681 *physical = address;
1682 return ERROR_OK;
1683 }
1684
1685 if (ERROR_OK == nds32_probe_tlb(nds32, address, physical))
1686 return ERROR_OK;
1687
1688 if (ERROR_OK == nds32_walk_page_table(nds32, address, physical))
1689 return ERROR_OK;
1690
1691 return ERROR_FAIL;
1692 }
1693
1694 int nds32_cache_sync(struct target *target, target_addr_t address, uint32_t length)
1695 {
1696 struct aice_port_s *aice = target_to_aice(target);
1697 struct nds32 *nds32 = target_to_nds32(target);
1698 struct nds32_cache *dcache = &(nds32->memory.dcache);
1699 struct nds32_cache *icache = &(nds32->memory.icache);
1700 uint32_t dcache_line_size = NDS32_LINE_SIZE_TABLE[dcache->line_size];
1701 uint32_t icache_line_size = NDS32_LINE_SIZE_TABLE[icache->line_size];
1702 uint32_t cur_address;
1703 int result;
1704 uint32_t start_line, end_line;
1705 uint32_t cur_line;
1706
1707 if ((dcache->line_size != 0) && (dcache->enable == true)) {
1708 /* address / dcache_line_size */
1709 start_line = address >> (dcache->line_size + 2);
1710 /* (address + length - 1) / dcache_line_size */
1711 end_line = (address + length - 1) >> (dcache->line_size + 2);
1712
1713 for (cur_address = address, cur_line = start_line;
1714 cur_line <= end_line;
1715 cur_address += dcache_line_size, cur_line++) {
1716 /* D$ write back */
1717 result = aice_cache_ctl(aice, AICE_CACHE_CTL_L1D_VA_WB, cur_address);
1718 if (result != ERROR_OK)
1719 return result;
1720
1721 /* D$ invalidate */
1722 result = aice_cache_ctl(aice, AICE_CACHE_CTL_L1D_VA_INVAL, cur_address);
1723 if (result != ERROR_OK)
1724 return result;
1725 }
1726 }
1727
1728 if ((icache->line_size != 0) && (icache->enable == true)) {
1729 /* address / icache_line_size */
1730 start_line = address >> (icache->line_size + 2);
1731 /* (address + length - 1) / icache_line_size */
1732 end_line = (address + length - 1) >> (icache->line_size + 2);
1733
1734 for (cur_address = address, cur_line = start_line;
1735 cur_line <= end_line;
1736 cur_address += icache_line_size, cur_line++) {
1737 /* Because PSW.IT is turned off under debug exception, address MUST
1738 * be physical address. L1I_VA_INVALIDATE uses PSW.IT to decide
1739 * address translation or not. */
1740 target_addr_t physical_addr;
1741 if (ERROR_FAIL == target->type->virt2phys(target, cur_address,
1742 &physical_addr))
1743 return ERROR_FAIL;
1744
1745 /* I$ invalidate */
1746 result = aice_cache_ctl(aice, AICE_CACHE_CTL_L1I_VA_INVAL, physical_addr);
1747 if (result != ERROR_OK)
1748 return result;
1749 }
1750 }
1751
1752 return ERROR_OK;
1753 }
1754
1755 uint32_t nds32_nextpc(struct nds32 *nds32, int current, uint32_t address)
1756 {
1757 if (!current)
1758 nds32_set_mapped_reg(nds32, PC, address);
1759 else
1760 nds32_get_mapped_reg(nds32, PC, &address);
1761
1762 return address;
1763 }
1764
1765 int nds32_step(struct target *target, int current,
1766 target_addr_t address, int handle_breakpoints)
1767 {
1768 LOG_DEBUG("target->state: %s",
1769 target_state_name(target));
1770
1771 if (target->state != TARGET_HALTED) {
1772 LOG_WARNING("target was not halted");
1773 return ERROR_TARGET_NOT_HALTED;
1774 }
1775
1776 struct nds32 *nds32 = target_to_nds32(target);
1777
1778 address = nds32_nextpc(nds32, current, address);
1779
1780 LOG_DEBUG("STEP PC %08" TARGET_PRIxADDR "%s", address, !current ? "!" : "");
1781
1782 /** set DSSIM */
1783 uint32_t ir14_value;
1784 nds32_get_mapped_reg(nds32, IR14, &ir14_value);
1785 if (nds32->step_isr_enable)
1786 ir14_value |= (0x1 << 31);
1787 else
1788 ir14_value &= ~(0x1 << 31);
1789 nds32_set_mapped_reg(nds32, IR14, ir14_value);
1790
1791 /* check hit_syscall before leave_debug_state() because
1792 * leave_debug_state() may clear hit_syscall flag */
1793 bool no_step = false;
1794 if (nds32->hit_syscall)
1795 /* step after hit_syscall should be ignored because
1796 * leave_debug_state will step implicitly to skip the
1797 * syscall */
1798 no_step = true;
1799
1800 /********* TODO: maybe create another function to handle this part */
1801 CHECK_RETVAL(nds32->leave_debug_state(nds32, true));
1802 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_RESUMED));
1803
1804 if (no_step == false) {
1805 struct aice_port_s *aice = target_to_aice(target);
1806 if (ERROR_OK != aice_step(aice))
1807 return ERROR_FAIL;
1808 }
1809
1810 /* save state */
1811 CHECK_RETVAL(nds32->enter_debug_state(nds32, true));
1812 /********* TODO: maybe create another function to handle this part */
1813
1814 /* restore DSSIM */
1815 if (nds32->step_isr_enable) {
1816 nds32_get_mapped_reg(nds32, IR14, &ir14_value);
1817 ir14_value &= ~(0x1 << 31);
1818 nds32_set_mapped_reg(nds32, IR14, ir14_value);
1819 }
1820
1821 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_HALTED));
1822
1823 return ERROR_OK;
1824 }
1825
1826 static int nds32_step_without_watchpoint(struct nds32 *nds32)
1827 {
1828 struct target *target = nds32->target;
1829
1830 if (target->state != TARGET_HALTED) {
1831 LOG_WARNING("target was not halted");
1832 return ERROR_TARGET_NOT_HALTED;
1833 }
1834
1835 /** set DSSIM */
1836 uint32_t ir14_value;
1837 nds32_get_mapped_reg(nds32, IR14, &ir14_value);
1838 if (nds32->step_isr_enable)
1839 ir14_value |= (0x1 << 31);
1840 else
1841 ir14_value &= ~(0x1 << 31);
1842 nds32_set_mapped_reg(nds32, IR14, ir14_value);
1843
1844 /********* TODO: maybe create another function to handle this part */
1845 CHECK_RETVAL(nds32->leave_debug_state(nds32, false));
1846
1847 struct aice_port_s *aice = target_to_aice(target);
1848
1849 if (ERROR_OK != aice_step(aice))
1850 return ERROR_FAIL;
1851
1852 /* save state */
1853 CHECK_RETVAL(nds32->enter_debug_state(nds32, false));
1854 /********* TODO: maybe create another function to handle this part */
1855
1856 /* restore DSSIM */
1857 if (nds32->step_isr_enable) {
1858 nds32_get_mapped_reg(nds32, IR14, &ir14_value);
1859 ir14_value &= ~(0x1 << 31);
1860 nds32_set_mapped_reg(nds32, IR14, ir14_value);
1861 }
1862
1863 return ERROR_OK;
1864 }
1865
1866 int nds32_target_state(struct nds32 *nds32, enum target_state *state)
1867 {
1868 struct aice_port_s *aice = target_to_aice(nds32->target);
1869 enum aice_target_state_s nds32_state;
1870
1871 if (aice_state(aice, &nds32_state) != ERROR_OK)
1872 return ERROR_FAIL;
1873
1874 switch (nds32_state) {
1875 case AICE_DISCONNECT:
1876 LOG_INFO("USB is disconnected");
1877 return ERROR_FAIL;
1878 case AICE_TARGET_DETACH:
1879 LOG_INFO("Target is disconnected");
1880 return ERROR_FAIL;
1881 case AICE_TARGET_UNKNOWN:
1882 *state = TARGET_UNKNOWN;
1883 break;
1884 case AICE_TARGET_RUNNING:
1885 *state = TARGET_RUNNING;
1886 break;
1887 case AICE_TARGET_HALTED:
1888 *state = TARGET_HALTED;
1889 break;
1890 case AICE_TARGET_RESET:
1891 *state = TARGET_RESET;
1892 break;
1893 case AICE_TARGET_DEBUG_RUNNING:
1894 *state = TARGET_DEBUG_RUNNING;
1895 break;
1896 default:
1897 return ERROR_FAIL;
1898 }
1899
1900 return ERROR_OK;
1901 }
1902
1903 int nds32_examine_debug_reason(struct nds32 *nds32)
1904 {
1905 uint32_t reason;
1906 struct target *target = nds32->target;
1907
1908 if (nds32->hit_syscall == true) {
1909 LOG_DEBUG("Hit syscall breakpoint");
1910 target->debug_reason = DBG_REASON_BREAKPOINT;
1911 return ERROR_OK;
1912 }
1913
1914 nds32->get_debug_reason(nds32, &reason);
1915
1916 LOG_DEBUG("nds32 examines debug reason: %s", nds32_debug_type_name[reason]);
1917
1918 /* Examine debug reason */
1919 switch (reason) {
1920 case NDS32_DEBUG_BREAK:
1921 case NDS32_DEBUG_BREAK_16:
1922 case NDS32_DEBUG_INST_BREAK:
1923 {
1924 uint32_t value_pc;
1925 uint32_t opcode;
1926 struct nds32_instruction instruction;
1927
1928 nds32_get_mapped_reg(nds32, PC, &value_pc);
1929
1930 if (ERROR_OK != nds32_read_opcode(nds32, value_pc, &opcode))
1931 return ERROR_FAIL;
1932 if (ERROR_OK != nds32_evaluate_opcode(nds32, opcode, value_pc,
1933 &instruction))
1934 return ERROR_FAIL;
1935
1936 /* hit 'break 0x7FFF' */
1937 if ((instruction.info.opc_6 == 0x32) &&
1938 (instruction.info.sub_opc == 0xA) &&
1939 (instruction.info.imm == 0x7FFF)) {
1940 target->debug_reason = DBG_REASON_EXIT;
1941 } else
1942 target->debug_reason = DBG_REASON_BREAKPOINT;
1943 }
1944 break;
1945 case NDS32_DEBUG_DATA_ADDR_WATCHPOINT_PRECISE:
1946 case NDS32_DEBUG_DATA_VALUE_WATCHPOINT_PRECISE:
1947 case NDS32_DEBUG_LOAD_STORE_GLOBAL_STOP: /* GLOBAL_STOP is precise exception */
1948 {
1949 int result;
1950
1951 result = nds32->get_watched_address(nds32,
1952 &(nds32->watched_address), reason);
1953 /* do single step(without watchpoints) to skip the "watched" instruction */
1954 nds32_step_without_watchpoint(nds32);
1955
1956 /* before single_step, save exception address */
1957 if (ERROR_OK != result)
1958 return ERROR_FAIL;
1959
1960 target->debug_reason = DBG_REASON_WATCHPOINT;
1961 }
1962 break;
1963 case NDS32_DEBUG_DEBUG_INTERRUPT:
1964 target->debug_reason = DBG_REASON_DBGRQ;
1965 break;
1966 case NDS32_DEBUG_HARDWARE_SINGLE_STEP:
1967 target->debug_reason = DBG_REASON_SINGLESTEP;
1968 break;
1969 case NDS32_DEBUG_DATA_VALUE_WATCHPOINT_IMPRECISE:
1970 case NDS32_DEBUG_DATA_ADDR_WATCHPOINT_NEXT_PRECISE:
1971 case NDS32_DEBUG_DATA_VALUE_WATCHPOINT_NEXT_PRECISE:
1972 if (ERROR_OK != nds32->get_watched_address(nds32,
1973 &(nds32->watched_address), reason))
1974 return ERROR_FAIL;
1975
1976 target->debug_reason = DBG_REASON_WATCHPOINT;
1977 break;
1978 default:
1979 target->debug_reason = DBG_REASON_UNDEFINED;
1980 break;
1981 }
1982
1983 return ERROR_OK;
1984 }
1985
1986 int nds32_login(struct nds32 *nds32)
1987 {
1988 struct target *target = nds32->target;
1989 struct aice_port_s *aice = target_to_aice(target);
1990 uint32_t passcode_length;
1991 char command_sequence[129];
1992 char command_str[33];
1993 char code_str[9];
1994 uint32_t copy_length;
1995 uint32_t code;
1996 uint32_t i;
1997
1998 LOG_DEBUG("nds32_login");
1999
2000 if (nds32->edm_passcode != NULL) {
2001 /* convert EDM passcode to command sequences */
2002 passcode_length = strlen(nds32->edm_passcode);
2003 command_sequence[0] = '\0';
2004 for (i = 0; i < passcode_length; i += 8) {
2005 if (passcode_length - i < 8)
2006 copy_length = passcode_length - i;
2007 else
2008 copy_length = 8;
2009
2010 strncpy(code_str, nds32->edm_passcode + i, copy_length);
2011 code_str[copy_length] = '\0';
2012 code = strtoul(code_str, NULL, 16);
2013
2014 sprintf(command_str, "write_misc gen_port0 0x%" PRIx32 ";", code);
2015 strcat(command_sequence, command_str);
2016 }
2017
2018 if (ERROR_OK != aice_program_edm(aice, command_sequence))
2019 return ERROR_FAIL;
2020
2021 /* get current privilege level */
2022 uint32_t value_edmsw;
2023 aice_read_debug_reg(aice, NDS_EDM_SR_EDMSW, &value_edmsw);
2024 nds32->privilege_level = (value_edmsw >> 16) & 0x3;
2025 LOG_INFO("Current privilege level: %d", nds32->privilege_level);
2026 }
2027
2028 if (nds32_edm_ops_num > 0) {
2029 const char *reg_name;
2030 for (i = 0 ; i < nds32_edm_ops_num ; i++) {
2031 code = nds32_edm_ops[i].value;
2032 if (nds32_edm_ops[i].reg_no == 6)
2033 reg_name = "gen_port0";
2034 else if (nds32_edm_ops[i].reg_no == 7)
2035 reg_name = "gen_port1";
2036 else
2037 return ERROR_FAIL;
2038
2039 sprintf(command_str, "write_misc %s 0x%" PRIx32 ";", reg_name, code);
2040 if (ERROR_OK != aice_program_edm(aice, command_str))
2041 return ERROR_FAIL;
2042 }
2043 }
2044
2045 return ERROR_OK;
2046 }
2047
2048 int nds32_halt(struct target *target)
2049 {
2050 struct nds32 *nds32 = target_to_nds32(target);
2051 struct aice_port_s *aice = target_to_aice(target);
2052 enum target_state state;
2053
2054 LOG_DEBUG("target->state: %s",
2055 target_state_name(target));
2056
2057 if (target->state == TARGET_HALTED) {
2058 LOG_DEBUG("target was already halted");
2059 return ERROR_OK;
2060 }
2061
2062 if (nds32_target_state(nds32, &state) != ERROR_OK)
2063 return ERROR_FAIL;
2064
2065 if (TARGET_HALTED != state)
2066 /* TODO: if state == TARGET_HALTED, check ETYPE is DBGI or not */
2067 if (ERROR_OK != aice_halt(aice))
2068 return ERROR_FAIL;
2069
2070 CHECK_RETVAL(nds32->enter_debug_state(nds32, true));
2071
2072 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_HALTED));
2073
2074 return ERROR_OK;
2075 }
2076
2077 /* poll current target status */
2078 int nds32_poll(struct target *target)
2079 {
2080 struct nds32 *nds32 = target_to_nds32(target);
2081 enum target_state state;
2082
2083 if (nds32_target_state(nds32, &state) != ERROR_OK)
2084 return ERROR_FAIL;
2085
2086 if (state == TARGET_HALTED) {
2087 if (target->state != TARGET_HALTED) {
2088 /* if false_hit, continue free_run */
2089 if (ERROR_OK != nds32->enter_debug_state(nds32, true)) {
2090 struct aice_port_s *aice = target_to_aice(target);
2091 aice_run(aice);
2092 return ERROR_OK;
2093 }
2094
2095 LOG_DEBUG("Change target state to TARGET_HALTED.");
2096
2097 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
2098 }
2099 } else if (state == TARGET_RESET) {
2100 if (target->state == TARGET_HALTED) {
2101 /* similar to assert srst */
2102 register_cache_invalidate(nds32->core_cache);
2103 target->state = TARGET_RESET;
2104
2105 /* TODO: deassert srst */
2106 } else if (target->state == TARGET_RUNNING) {
2107 /* reset as running */
2108 LOG_WARNING("<-- TARGET WARNING! The debug target has been reset. -->");
2109 }
2110 } else {
2111 if (target->state != TARGET_RUNNING && target->state != TARGET_DEBUG_RUNNING) {
2112 LOG_DEBUG("Change target state to TARGET_RUNNING.");
2113 target->state = TARGET_RUNNING;
2114 target->debug_reason = DBG_REASON_NOTHALTED;
2115 }
2116 }
2117
2118 return ERROR_OK;
2119 }
2120
2121 int nds32_resume(struct target *target, int current,
2122 target_addr_t address, int handle_breakpoints, int debug_execution)
2123 {
2124 LOG_DEBUG("current %d address %08" TARGET_PRIxADDR
2125 " handle_breakpoints %d"
2126 " debug_execution %d",
2127 current, address, handle_breakpoints, debug_execution);
2128
2129 struct nds32 *nds32 = target_to_nds32(target);
2130
2131 if (target->state != TARGET_HALTED) {
2132 LOG_ERROR("Target not halted");
2133 return ERROR_TARGET_NOT_HALTED;
2134 }
2135
2136 address = nds32_nextpc(nds32, current, address);
2137
2138 LOG_DEBUG("RESUME PC %08" TARGET_PRIxADDR "%s", address, !current ? "!" : "");
2139
2140 if (!debug_execution)
2141 target_free_all_working_areas(target);
2142
2143 /* Disable HSS to avoid users misuse HSS */
2144 if (nds32_reach_max_interrupt_level(nds32) == false) {
2145 uint32_t value_ir0;
2146 nds32_get_mapped_reg(nds32, IR0, &value_ir0);
2147 value_ir0 &= ~(0x1 << 11);
2148 nds32_set_mapped_reg(nds32, IR0, value_ir0);
2149 }
2150
2151 CHECK_RETVAL(nds32->leave_debug_state(nds32, true));
2152 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_RESUMED));
2153
2154 if (nds32->virtual_hosting_ctrl_c == false) {
2155 struct aice_port_s *aice = target_to_aice(target);
2156 aice_run(aice);
2157 } else
2158 nds32->virtual_hosting_ctrl_c = false;
2159
2160 target->debug_reason = DBG_REASON_NOTHALTED;
2161 if (!debug_execution)
2162 target->state = TARGET_RUNNING;
2163 else
2164 target->state = TARGET_DEBUG_RUNNING;
2165
2166 LOG_DEBUG("target->state: %s",
2167 target_state_name(target));
2168
2169 return ERROR_OK;
2170 }
2171
2172 static int nds32_soft_reset_halt(struct target *target)
2173 {
2174 /* TODO: test it */
2175 struct nds32 *nds32 = target_to_nds32(target);
2176 struct aice_port_s *aice = target_to_aice(target);
2177
2178 aice_assert_srst(aice, AICE_SRST);
2179
2180 /* halt core and set pc to 0x0 */
2181 int retval = target_halt(target);
2182 if (retval != ERROR_OK)
2183 return retval;
2184
2185 /* start fetching from IVB */
2186 uint32_t value_ir3;
2187 nds32_get_mapped_reg(nds32, IR3, &value_ir3);
2188 nds32_set_mapped_reg(nds32, PC, value_ir3 & 0xFFFF0000);
2189
2190 return ERROR_OK;
2191 }
2192
2193 int nds32_assert_reset(struct target *target)
2194 {
2195 struct nds32 *nds32 = target_to_nds32(target);
2196 struct aice_port_s *aice = target_to_aice(target);
2197 struct nds32_cpu_version *cpu_version = &(nds32->cpu_version);
2198
2199 /* TODO: apply hw reset signal in not examined state */
2200 if (!(target_was_examined(target))) {
2201 LOG_WARNING("Reset is not asserted because the target is not examined.");
2202 LOG_WARNING("Use a reset button or power cycle the target.");
2203 return ERROR_TARGET_NOT_EXAMINED;
2204 }
2205
2206 if (target->reset_halt) {
2207 if ((nds32->soft_reset_halt)
2208 || (nds32->edm.version < 0x51)
2209 || ((nds32->edm.version == 0x51)
2210 && (cpu_version->revision == 0x1C)
2211 && (cpu_version->cpu_id_family == 0xC)
2212 && (cpu_version->cpu_id_version == 0x0)))
2213 nds32_soft_reset_halt(target);
2214 else
2215 aice_assert_srst(aice, AICE_RESET_HOLD);
2216 } else {
2217 aice_assert_srst(aice, AICE_SRST);
2218 alive_sleep(nds32->boot_time);
2219 }
2220
2221 /* set passcode for secure MCU after core reset */
2222 nds32_login(nds32);
2223
2224 /* registers are now invalid */
2225 register_cache_invalidate(nds32->core_cache);
2226
2227 target->state = TARGET_RESET;
2228
2229 return ERROR_OK;
2230 }
2231
2232 static int nds32_gdb_attach(struct nds32 *nds32)
2233 {
2234 LOG_DEBUG("nds32_gdb_attach, target coreid: %" PRId32, nds32->target->coreid);
2235
2236 if (nds32->attached == false) {
2237
2238 if (nds32->keep_target_edm_ctl) {
2239 /* backup target EDM_CTL */
2240 struct aice_port_s *aice = target_to_aice(nds32->target);
2241 aice_read_debug_reg(aice, NDS_EDM_SR_EDM_CTL, &nds32->backup_edm_ctl);
2242 }
2243
2244 target_halt(nds32->target);
2245
2246 nds32->attached = true;
2247 }
2248
2249 return ERROR_OK;
2250 }
2251
2252 static int nds32_gdb_detach(struct nds32 *nds32)
2253 {
2254 LOG_DEBUG("nds32_gdb_detach");
2255 bool backup_virtual_hosting_setting;
2256
2257 if (nds32->attached) {
2258
2259 backup_virtual_hosting_setting = nds32->virtual_hosting;
2260 /* turn off virtual hosting before resume as gdb-detach */
2261 nds32->virtual_hosting = false;
2262 target_resume(nds32->target, 1, 0, 0, 0);
2263 nds32->virtual_hosting = backup_virtual_hosting_setting;
2264
2265 if (nds32->keep_target_edm_ctl) {
2266 /* restore target EDM_CTL */
2267 struct aice_port_s *aice = target_to_aice(nds32->target);
2268 aice_write_debug_reg(aice, NDS_EDM_SR_EDM_CTL, nds32->backup_edm_ctl);
2269 }
2270
2271 nds32->attached = false;
2272 }
2273
2274 return ERROR_OK;
2275 }
2276
2277 static int nds32_callback_event_handler(struct target *target,
2278 enum target_event event, void *priv)
2279 {
2280 int retval = ERROR_OK;
2281 int target_number = *(int *)priv;
2282
2283 if (target_number != target->target_number)
2284 return ERROR_OK;
2285
2286 struct nds32 *nds32 = target_to_nds32(target);
2287
2288 switch (event) {
2289 case TARGET_EVENT_GDB_ATTACH:
2290 retval = nds32_gdb_attach(nds32);
2291 break;
2292 case TARGET_EVENT_GDB_DETACH:
2293 retval = nds32_gdb_detach(nds32);
2294 break;
2295 default:
2296 break;
2297 }
2298
2299 return retval;
2300 }
2301
2302 int nds32_init(struct nds32 *nds32)
2303 {
2304 /* Initialize anything we can set up without talking to the target */
2305 nds32->memory.access_channel = NDS_MEMORY_ACC_CPU;
2306
2307 /* register event callback */
2308 target_register_event_callback(nds32_callback_event_handler,
2309 &(nds32->target->target_number));
2310
2311 return ERROR_OK;
2312 }
2313
2314 int nds32_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
2315 {
2316 /* fill syscall parameters to file-I/O info */
2317 if (NULL == fileio_info) {
2318 LOG_ERROR("Target has not initial file-I/O data structure");
2319 return ERROR_FAIL;
2320 }
2321
2322 struct nds32 *nds32 = target_to_nds32(target);
2323 uint32_t value_ir6;
2324 uint32_t syscall_id;
2325
2326 if (nds32->hit_syscall == false)
2327 return ERROR_FAIL;
2328
2329 nds32_get_mapped_reg(nds32, IR6, &value_ir6);
2330 syscall_id = (value_ir6 >> 16) & 0x7FFF;
2331 nds32->active_syscall_id = syscall_id;
2332
2333 LOG_DEBUG("hit syscall ID: 0x%" PRIx32, syscall_id);
2334
2335 /* free previous identifier storage */
2336 free(fileio_info->identifier);
2337 fileio_info->identifier = NULL;
2338
2339 uint32_t reg_r0, reg_r1, reg_r2;
2340 nds32_get_mapped_reg(nds32, R0, &reg_r0);
2341 nds32_get_mapped_reg(nds32, R1, &reg_r1);
2342 nds32_get_mapped_reg(nds32, R2, &reg_r2);
2343
2344 switch (syscall_id) {
2345 case NDS32_SYSCALL_EXIT:
2346 fileio_info->identifier = malloc(5);
2347 sprintf(fileio_info->identifier, "exit");
2348 fileio_info->param_1 = reg_r0;
2349 break;
2350 case NDS32_SYSCALL_OPEN:
2351 {
2352 uint8_t filename[256];
2353 fileio_info->identifier = malloc(5);
2354 sprintf(fileio_info->identifier, "open");
2355 fileio_info->param_1 = reg_r0;
2356 /* reserve fileio_info->param_2 for length of path */
2357 fileio_info->param_3 = reg_r1;
2358 fileio_info->param_4 = reg_r2;
2359
2360 target->type->read_buffer(target, reg_r0, 256, filename);
2361 fileio_info->param_2 = strlen((char *)filename);
2362 }
2363 break;
2364 case NDS32_SYSCALL_CLOSE:
2365 fileio_info->identifier = malloc(6);
2366 sprintf(fileio_info->identifier, "close");
2367 fileio_info->param_1 = reg_r0;
2368 break;
2369 case NDS32_SYSCALL_READ:
2370 fileio_info->identifier = malloc(5);
2371 sprintf(fileio_info->identifier, "read");
2372 fileio_info->param_1 = reg_r0;
2373 fileio_info->param_2 = reg_r1;
2374 fileio_info->param_3 = reg_r2;
2375 break;
2376 case NDS32_SYSCALL_WRITE:
2377 fileio_info->identifier = malloc(6);
2378 sprintf(fileio_info->identifier, "write");
2379 fileio_info->param_1 = reg_r0;
2380 fileio_info->param_2 = reg_r1;
2381 fileio_info->param_3 = reg_r2;
2382 break;
2383 case NDS32_SYSCALL_LSEEK:
2384 fileio_info->identifier = malloc(6);
2385 sprintf(fileio_info->identifier, "lseek");
2386 fileio_info->param_1 = reg_r0;
2387 fileio_info->param_2 = reg_r1;
2388 fileio_info->param_3 = reg_r2;
2389 break;
2390 case NDS32_SYSCALL_UNLINK:
2391 {
2392 uint8_t filename[256];
2393 fileio_info->identifier = malloc(7);
2394 sprintf(fileio_info->identifier, "unlink");
2395 fileio_info->param_1 = reg_r0;
2396 /* reserve fileio_info->param_2 for length of path */
2397
2398 target->type->read_buffer(target, reg_r0, 256, filename);
2399 fileio_info->param_2 = strlen((char *)filename);
2400 }
2401 break;
2402 case NDS32_SYSCALL_RENAME:
2403 {
2404 uint8_t filename[256];
2405 fileio_info->identifier = malloc(7);
2406 sprintf(fileio_info->identifier, "rename");
2407 fileio_info->param_1 = reg_r0;
2408 /* reserve fileio_info->param_2 for length of old path */
2409 fileio_info->param_3 = reg_r1;
2410 /* reserve fileio_info->param_4 for length of new path */
2411
2412 target->type->read_buffer(target, reg_r0, 256, filename);
2413 fileio_info->param_2 = strlen((char *)filename);
2414
2415 target->type->read_buffer(target, reg_r1, 256, filename);
2416 fileio_info->param_4 = strlen((char *)filename);
2417 }
2418 break;
2419 case NDS32_SYSCALL_FSTAT:
2420 fileio_info->identifier = malloc(6);
2421 sprintf(fileio_info->identifier, "fstat");
2422 fileio_info->param_1 = reg_r0;
2423 fileio_info->param_2 = reg_r1;
2424 break;
2425 case NDS32_SYSCALL_STAT:
2426 {
2427 uint8_t filename[256];
2428 fileio_info->identifier = malloc(5);
2429 sprintf(fileio_info->identifier, "stat");
2430 fileio_info->param_1 = reg_r0;
2431 /* reserve fileio_info->param_2 for length of old path */
2432 fileio_info->param_3 = reg_r1;
2433
2434 target->type->read_buffer(target, reg_r0, 256, filename);
2435 fileio_info->param_2 = strlen((char *)filename) + 1;
2436 }
2437 break;
2438 case NDS32_SYSCALL_GETTIMEOFDAY:
2439 fileio_info->identifier = malloc(13);
2440 sprintf(fileio_info->identifier, "gettimeofday");
2441 fileio_info->param_1 = reg_r0;
2442 fileio_info->param_2 = reg_r1;
2443 break;
2444 case NDS32_SYSCALL_ISATTY:
2445 fileio_info->identifier = malloc(7);
2446 sprintf(fileio_info->identifier, "isatty");
2447 fileio_info->param_1 = reg_r0;
2448 break;
2449 case NDS32_SYSCALL_SYSTEM:
2450 {
2451 uint8_t command[256];
2452 fileio_info->identifier = malloc(7);
2453 sprintf(fileio_info->identifier, "system");
2454 fileio_info->param_1 = reg_r0;
2455 /* reserve fileio_info->param_2 for length of old path */
2456
2457 target->type->read_buffer(target, reg_r0, 256, command);
2458 fileio_info->param_2 = strlen((char *)command);
2459 }
2460 break;
2461 case NDS32_SYSCALL_ERRNO:
2462 fileio_info->identifier = malloc(6);
2463 sprintf(fileio_info->identifier, "errno");
2464 nds32_set_mapped_reg(nds32, R0, nds32->virtual_hosting_errno);
2465 break;
2466 default:
2467 fileio_info->identifier = malloc(8);
2468 sprintf(fileio_info->identifier, "unknown");
2469 break;
2470 }
2471
2472 return ERROR_OK;
2473 }
2474
2475 int nds32_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
2476 {
2477 LOG_DEBUG("syscall return code: 0x%x, errno: 0x%x , ctrl_c: %s",
2478 retcode, fileio_errno, ctrl_c ? "true" : "false");
2479
2480 struct nds32 *nds32 = target_to_nds32(target);
2481
2482 nds32_set_mapped_reg(nds32, R0, (uint32_t)retcode);
2483
2484 nds32->virtual_hosting_errno = fileio_errno;
2485 nds32->virtual_hosting_ctrl_c = ctrl_c;
2486 nds32->active_syscall_id = NDS32_SYSCALL_UNDEFINED;
2487
2488 return ERROR_OK;
2489 }
2490
2491 int nds32_profiling(struct target *target, uint32_t *samples,
2492 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2493 {
2494 /* sample $PC every 10 milliseconds */
2495 uint32_t iteration = seconds * 100;
2496 struct aice_port_s *aice = target_to_aice(target);
2497 struct nds32 *nds32 = target_to_nds32(target);
2498
2499 /* REVISIT: can nds32 profile without halting? */
2500 if (target->state != TARGET_HALTED) {
2501 LOG_WARNING("target %s is not halted (profiling)", target->cmd_name);
2502 return ERROR_TARGET_NOT_HALTED;
2503 }
2504
2505 if (max_num_samples < iteration)
2506 iteration = max_num_samples;
2507
2508 int pc_regnum = nds32->register_map(nds32, PC);
2509 aice_profiling(aice, 10, iteration, pc_regnum, samples, num_samples);
2510
2511 register_cache_invalidate(nds32->core_cache);
2512
2513 return ERROR_OK;
2514 }
2515
2516 int nds32_gdb_fileio_write_memory(struct nds32 *nds32, uint32_t address,
2517 uint32_t size, const uint8_t *buffer)
2518 {
2519 if ((NDS32_SYSCALL_FSTAT == nds32->active_syscall_id) ||
2520 (NDS32_SYSCALL_STAT == nds32->active_syscall_id)) {
2521 /* If doing GDB file-I/O, target should convert 'struct stat'
2522 * from gdb-format to target-format */
2523 uint8_t stat_buffer[NDS32_STRUCT_STAT_SIZE];
2524 /* st_dev 2 */
2525 stat_buffer[0] = buffer[3];
2526 stat_buffer[1] = buffer[2];
2527 /* st_ino 2 */
2528 stat_buffer[2] = buffer[7];
2529 stat_buffer[3] = buffer[6];
2530 /* st_mode 4 */
2531 stat_buffer[4] = buffer[11];
2532 stat_buffer[5] = buffer[10];
2533 stat_buffer[6] = buffer[9];
2534 stat_buffer[7] = buffer[8];
2535 /* st_nlink 2 */
2536 stat_buffer[8] = buffer[15];
2537 stat_buffer[9] = buffer[16];
2538 /* st_uid 2 */
2539 stat_buffer[10] = buffer[19];
2540 stat_buffer[11] = buffer[18];
2541 /* st_gid 2 */
2542 stat_buffer[12] = buffer[23];
2543 stat_buffer[13] = buffer[22];
2544 /* st_rdev 2 */
2545 stat_buffer[14] = buffer[27];
2546 stat_buffer[15] = buffer[26];
2547 /* st_size 4 */
2548 stat_buffer[16] = buffer[35];
2549 stat_buffer[17] = buffer[34];
2550 stat_buffer[18] = buffer[33];
2551 stat_buffer[19] = buffer[32];
2552 /* st_atime 4 */
2553 stat_buffer[20] = buffer[55];
2554 stat_buffer[21] = buffer[54];
2555 stat_buffer[22] = buffer[53];
2556 stat_buffer[23] = buffer[52];
2557 /* st_spare1 4 */
2558 stat_buffer[24] = 0;
2559 stat_buffer[25] = 0;
2560 stat_buffer[26] = 0;
2561 stat_buffer[27] = 0;
2562 /* st_mtime 4 */
2563 stat_buffer[28] = buffer[59];
2564 stat_buffer[29] = buffer[58];
2565 stat_buffer[30] = buffer[57];
2566 stat_buffer[31] = buffer[56];
2567 /* st_spare2 4 */
2568 stat_buffer[32] = 0;
2569 stat_buffer[33] = 0;
2570 stat_buffer[34] = 0;
2571 stat_buffer[35] = 0;
2572 /* st_ctime 4 */
2573 stat_buffer[36] = buffer[63];
2574 stat_buffer[37] = buffer[62];
2575 stat_buffer[38] = buffer[61];
2576 stat_buffer[39] = buffer[60];
2577 /* st_spare3 4 */
2578 stat_buffer[40] = 0;
2579 stat_buffer[41] = 0;
2580 stat_buffer[42] = 0;
2581 stat_buffer[43] = 0;
2582 /* st_blksize 4 */
2583 stat_buffer[44] = buffer[43];
2584 stat_buffer[45] = buffer[42];
2585 stat_buffer[46] = buffer[41];
2586 stat_buffer[47] = buffer[40];
2587 /* st_blocks 4 */
2588 stat_buffer[48] = buffer[51];
2589 stat_buffer[49] = buffer[50];
2590 stat_buffer[50] = buffer[49];
2591 stat_buffer[51] = buffer[48];
2592 /* st_spare4 8 */
2593 stat_buffer[52] = 0;
2594 stat_buffer[53] = 0;
2595 stat_buffer[54] = 0;
2596 stat_buffer[55] = 0;
2597 stat_buffer[56] = 0;
2598 stat_buffer[57] = 0;
2599 stat_buffer[58] = 0;
2600 stat_buffer[59] = 0;
2601
2602 return nds32_write_buffer(nds32->target, address, NDS32_STRUCT_STAT_SIZE, stat_buffer);
2603 } else if (NDS32_SYSCALL_GETTIMEOFDAY == nds32->active_syscall_id) {
2604 /* If doing GDB file-I/O, target should convert 'struct timeval'
2605 * from gdb-format to target-format */
2606 uint8_t timeval_buffer[NDS32_STRUCT_TIMEVAL_SIZE];
2607 timeval_buffer[0] = buffer[3];
2608 timeval_buffer[1] = buffer[2];
2609 timeval_buffer[2] = buffer[1];
2610 timeval_buffer[3] = buffer[0];
2611 timeval_buffer[4] = buffer[11];
2612 timeval_buffer[5] = buffer[10];
2613 timeval_buffer[6] = buffer[9];
2614 timeval_buffer[7] = buffer[8];
2615
2616 return nds32_write_buffer(nds32->target, address, NDS32_STRUCT_TIMEVAL_SIZE, timeval_buffer);
2617 }
2618
2619 return nds32_write_buffer(nds32->target, address, size, buffer);
2620 }
2621
2622 int nds32_reset_halt(struct nds32 *nds32)
2623 {
2624 LOG_INFO("reset halt as init");
2625
2626 struct aice_port_s *aice = target_to_aice(nds32->target);
2627 aice_assert_srst(aice, AICE_RESET_HOLD);
2628
2629 return ERROR_OK;
2630 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)