nds32: Use the correct method to access registers
[openocd.git] / src / target / nds32.c
1 /***************************************************************************
2 * Copyright (C) 2013 Andes Technology *
3 * Hsiangkai Wang <hkwang@andestech.com> *
4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program; if not, write to the *
17 * Free Software Foundation, Inc., *
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
19 ***************************************************************************/
20
21 #ifdef HAVE_CONFIG_H
22 #include "config.h"
23 #endif
24
25 #include <helper/log.h>
26 #include <helper/binarybuffer.h>
27 #include "nds32.h"
28 #include "nds32_aice.h"
29 #include "nds32_tlb.h"
30 #include "nds32_disassembler.h"
31
32 const int NDS32_BREAK_16 = 0x00EA; /* 0xEA00 */
33 const int NDS32_BREAK_32 = 0x0A000064; /* 0x6400000A */
34
35 struct nds32_edm_operation nds32_edm_ops[NDS32_EDM_OPERATION_MAX_NUM];
36 uint32_t nds32_edm_ops_num;
37
38 const char *nds32_debug_type_name[11] = {
39 "SOFTWARE BREAK",
40 "SOFTWARE BREAK_16",
41 "HARDWARE BREAKPOINT",
42 "DATA ADDR WATCHPOINT PRECISE",
43 "DATA VALUE WATCHPOINT PRECISE",
44 "DATA VALUE WATCHPOINT IMPRECISE",
45 "DEBUG INTERRUPT",
46 "HARDWARE SINGLE STEP",
47 "DATA ADDR WATCHPOINT NEXT PRECISE",
48 "DATA VALUE WATCHPOINT NEXT PRECISE",
49 "LOAD STORE GLOBAL STOP",
50 };
51
52 static const int NDS32_LM_SIZE_TABLE[16] = {
53 4 * 1024,
54 8 * 1024,
55 16 * 1024,
56 32 * 1024,
57 64 * 1024,
58 128 * 1024,
59 256 * 1024,
60 512 * 1024,
61 1024 * 1024,
62 1 * 1024,
63 2 * 1024,
64 };
65
66 static const int NDS32_LINE_SIZE_TABLE[6] = {
67 0,
68 8,
69 16,
70 32,
71 64,
72 128,
73 };
74
75 static int nds32_get_core_reg(struct reg *reg)
76 {
77 int retval;
78 struct nds32_reg *reg_arch_info = reg->arch_info;
79 struct target *target = reg_arch_info->target;
80 struct nds32 *nds32 = target_to_nds32(target);
81 struct aice_port_s *aice = target_to_aice(target);
82
83 if (target->state != TARGET_HALTED) {
84 LOG_ERROR("Target not halted");
85 return ERROR_TARGET_NOT_HALTED;
86 }
87
88 if (reg->valid) {
89 uint32_t val = buf_get_u32(reg_arch_info->value, 0, 32);
90 LOG_DEBUG("reading register(cached) %" PRIi32 "(%s), value: 0x%8.8" PRIx32,
91 reg_arch_info->num, reg->name, val);
92 return ERROR_OK;
93 }
94
95 int mapped_regnum = nds32->register_map(nds32, reg_arch_info->num);
96
97 if (reg_arch_info->enable == false) {
98 buf_set_u32(reg_arch_info->value, 0, 32, NDS32_REGISTER_DISABLE);
99 retval = ERROR_FAIL;
100 } else {
101 uint32_t val = 0;
102 if ((nds32->fpu_enable == false)
103 && (NDS32_REG_TYPE_FPU == nds32_reg_type(mapped_regnum))) {
104 retval = ERROR_OK;
105 } else if ((nds32->audio_enable == false)
106 && (NDS32_REG_TYPE_AUMR == nds32_reg_type(mapped_regnum))) {
107 retval = ERROR_OK;
108 } else {
109 retval = aice_read_register(aice, mapped_regnum, &val);
110 }
111 buf_set_u32(reg_arch_info->value, 0, 32, val);
112
113 LOG_DEBUG("reading register %" PRIi32 "(%s), value: 0x%8.8" PRIx32,
114 reg_arch_info->num, reg->name, val);
115 }
116
117 if (retval == ERROR_OK) {
118 reg->valid = true;
119 reg->dirty = false;
120 }
121
122 return retval;
123 }
124
125 static int nds32_get_core_reg_64(struct reg *reg)
126 {
127 int retval;
128 struct nds32_reg *reg_arch_info = reg->arch_info;
129 struct target *target = reg_arch_info->target;
130 struct nds32 *nds32 = target_to_nds32(target);
131 struct aice_port_s *aice = target_to_aice(target);
132
133 if (target->state != TARGET_HALTED) {
134 LOG_ERROR("Target not halted");
135 return ERROR_TARGET_NOT_HALTED;
136 }
137
138 if (reg->valid)
139 return ERROR_OK;
140
141 if (reg_arch_info->enable == false) {
142 buf_set_u64(reg_arch_info->value, 0, 64, NDS32_REGISTER_DISABLE);
143 retval = ERROR_FAIL;
144 } else {
145 uint64_t val = 0;
146 if ((nds32->fpu_enable == false)
147 && ((FD0 <= reg_arch_info->num) && (reg_arch_info->num <= FD31))) {
148 retval = ERROR_OK;
149 } else {
150 retval = aice_read_reg_64(aice, reg_arch_info->num, &val);
151 }
152 buf_set_u64(reg_arch_info->value, 0, 64, val);
153 }
154
155 if (retval == ERROR_OK) {
156 reg->valid = true;
157 reg->dirty = false;
158 }
159
160 return retval;
161 }
162
163 static int nds32_update_psw(struct nds32 *nds32)
164 {
165 uint32_t value_ir0;
166 struct aice_port_s *aice = target_to_aice(nds32->target);
167
168 nds32_get_mapped_reg(nds32, IR0, &value_ir0);
169
170 /* Save data memory endian */
171 if ((value_ir0 >> 5) & 0x1) {
172 nds32->data_endian = TARGET_BIG_ENDIAN;
173 aice_set_data_endian(aice, AICE_BIG_ENDIAN);
174 } else {
175 nds32->data_endian = TARGET_LITTLE_ENDIAN;
176 aice_set_data_endian(aice, AICE_LITTLE_ENDIAN);
177 }
178
179 /* Save translation status */
180 nds32->memory.address_translation = ((value_ir0 >> 7) & 0x1) ? true : false;
181
182 return ERROR_OK;
183 }
184
185 static int nds32_update_mmu_info(struct nds32 *nds32)
186 {
187 uint32_t value;
188
189 /* Update MMU control status */
190 nds32_get_mapped_reg(nds32, MR0, &value);
191 nds32->mmu_config.default_min_page_size = value & 0x1;
192 nds32->mmu_config.multiple_page_size_in_use = (value >> 10) & 0x1;
193
194 return ERROR_OK;
195 }
196
197 static int nds32_update_cache_info(struct nds32 *nds32)
198 {
199 uint32_t value;
200
201 if (ERROR_OK == nds32_get_mapped_reg(nds32, MR8, &value)) {
202 if (value & 0x1)
203 nds32->memory.icache.enable = true;
204 else
205 nds32->memory.icache.enable = false;
206
207 if (value & 0x2)
208 nds32->memory.dcache.enable = true;
209 else
210 nds32->memory.dcache.enable = false;
211 } else {
212 nds32->memory.icache.enable = false;
213 nds32->memory.dcache.enable = false;
214 }
215
216 return ERROR_OK;
217 }
218
219 static int nds32_update_lm_info(struct nds32 *nds32)
220 {
221 struct nds32_memory *memory = &(nds32->memory);
222 uint32_t value_mr6;
223 uint32_t value_mr7;
224
225 nds32_get_mapped_reg(nds32, MR6, &value_mr6);
226 if (value_mr6 & 0x1)
227 memory->ilm_enable = true;
228 else
229 memory->ilm_enable = false;
230
231 if (memory->ilm_align_ver == 0) { /* 1MB aligned */
232 memory->ilm_start = value_mr6 & 0xFFF00000;
233 memory->ilm_end = memory->ilm_start + memory->ilm_size;
234 } else if (memory->ilm_align_ver == 1) { /* aligned to local memory size */
235 memory->ilm_start = value_mr6 & 0xFFFFFC00;
236 memory->ilm_end = memory->ilm_start + memory->ilm_size;
237 } else {
238 memory->ilm_start = -1;
239 memory->ilm_end = -1;
240 }
241
242 nds32_get_mapped_reg(nds32, MR7, &value_mr7);
243 if (value_mr7 & 0x1)
244 memory->dlm_enable = true;
245 else
246 memory->dlm_enable = false;
247
248 if (memory->dlm_align_ver == 0) { /* 1MB aligned */
249 memory->dlm_start = value_mr7 & 0xFFF00000;
250 memory->dlm_end = memory->dlm_start + memory->dlm_size;
251 } else if (memory->dlm_align_ver == 1) { /* aligned to local memory size */
252 memory->dlm_start = value_mr7 & 0xFFFFFC00;
253 memory->dlm_end = memory->dlm_start + memory->dlm_size;
254 } else {
255 memory->dlm_start = -1;
256 memory->dlm_end = -1;
257 }
258
259 return ERROR_OK;
260 }
261
262 /**
263 * If fpu/audio is disabled, to access fpu/audio registers will cause
264 * exceptions. So, we need to check if fpu/audio is enabled or not as
265 * target is halted. If fpu/audio is disabled, as users access fpu/audio
266 * registers, OpenOCD will return fake value 0 instead of accessing
267 * registers through DIM.
268 */
269 static int nds32_check_extension(struct nds32 *nds32)
270 {
271 uint32_t value;
272
273 nds32_get_mapped_reg(nds32, FUCPR, &value);
274 if (value == NDS32_REGISTER_DISABLE) {
275 nds32->fpu_enable = false;
276 nds32->audio_enable = false;
277 return ERROR_OK;
278 }
279
280 if (value & 0x1)
281 nds32->fpu_enable = true;
282 else
283 nds32->fpu_enable = false;
284
285 if (value & 0x80000000)
286 nds32->audio_enable = true;
287 else
288 nds32->audio_enable = false;
289
290 return ERROR_OK;
291 }
292
293 static int nds32_set_core_reg(struct reg *reg, uint8_t *buf)
294 {
295 struct nds32_reg *reg_arch_info = reg->arch_info;
296 struct target *target = reg_arch_info->target;
297 struct nds32 *nds32 = target_to_nds32(target);
298 struct aice_port_s *aice = target_to_aice(target);
299 uint32_t value = buf_get_u32(buf, 0, 32);
300
301 if (target->state != TARGET_HALTED) {
302 LOG_ERROR("Target not halted");
303 return ERROR_TARGET_NOT_HALTED;
304 }
305
306 int mapped_regnum = nds32->register_map(nds32, reg_arch_info->num);
307
308 /* ignore values that will generate exception */
309 if (nds32_reg_exception(mapped_regnum, value))
310 return ERROR_OK;
311
312 LOG_DEBUG("writing register %" PRIi32 "(%s) with value 0x%8.8" PRIx32,
313 reg_arch_info->num, reg->name, value);
314
315 if ((nds32->fpu_enable == false) &&
316 (NDS32_REG_TYPE_FPU == nds32_reg_type(mapped_regnum))) {
317
318 buf_set_u32(reg->value, 0, 32, 0);
319 } else if ((nds32->audio_enable == false) &&
320 (NDS32_REG_TYPE_AUMR == nds32_reg_type(mapped_regnum))) {
321
322 buf_set_u32(reg->value, 0, 32, 0);
323 } else {
324 buf_set_u32(reg->value, 0, 32, value);
325 uint32_t val = buf_get_u32(reg_arch_info->value, 0, 32);
326 aice_write_register(aice, mapped_regnum, val);
327
328 /* After set value to registers, read the value from target
329 * to avoid W1C inconsistency. */
330 aice_read_register(aice, mapped_regnum, &val);
331 buf_set_u32(reg_arch_info->value, 0, 32, val);
332 }
333
334 reg->valid = true;
335 reg->dirty = false;
336
337 /* update registers to take effect right now */
338 if (IR0 == mapped_regnum) {
339 nds32_update_psw(nds32);
340 } else if (MR0 == mapped_regnum) {
341 nds32_update_mmu_info(nds32);
342 } else if ((MR6 == mapped_regnum) || (MR7 == mapped_regnum)) {
343 /* update lm information */
344 nds32_update_lm_info(nds32);
345 } else if (MR8 == mapped_regnum) {
346 nds32_update_cache_info(nds32);
347 } else if (FUCPR == mapped_regnum) {
348 /* update audio/fpu setting */
349 nds32_check_extension(nds32);
350 }
351
352 return ERROR_OK;
353 }
354
355 static int nds32_set_core_reg_64(struct reg *reg, uint8_t *buf)
356 {
357 struct nds32_reg *reg_arch_info = reg->arch_info;
358 struct target *target = reg_arch_info->target;
359 struct nds32 *nds32 = target_to_nds32(target);
360 uint32_t low_part = buf_get_u32(buf, 0, 32);
361 uint32_t high_part = buf_get_u32(buf, 32, 32);
362
363 if (target->state != TARGET_HALTED) {
364 LOG_ERROR("Target not halted");
365 return ERROR_TARGET_NOT_HALTED;
366 }
367
368 if ((nds32->fpu_enable == false) &&
369 ((FD0 <= reg_arch_info->num) && (reg_arch_info->num <= FD31))) {
370
371 buf_set_u32(reg->value, 0, 32, 0);
372 buf_set_u32(reg->value, 32, 32, 0);
373
374 reg->valid = true;
375 reg->dirty = false;
376 } else {
377 buf_set_u32(reg->value, 0, 32, low_part);
378 buf_set_u32(reg->value, 32, 32, high_part);
379
380 reg->valid = true;
381 reg->dirty = true;
382 }
383
384 return ERROR_OK;
385 }
386
387 static const struct reg_arch_type nds32_reg_access_type = {
388 .get = nds32_get_core_reg,
389 .set = nds32_set_core_reg,
390 };
391
392 static const struct reg_arch_type nds32_reg_access_type_64 = {
393 .get = nds32_get_core_reg_64,
394 .set = nds32_set_core_reg_64,
395 };
396
397 static struct reg_cache *nds32_build_reg_cache(struct target *target,
398 struct nds32 *nds32)
399 {
400 struct reg_cache *cache = calloc(sizeof(struct reg_cache), 1);
401 struct reg *reg_list = calloc(TOTAL_REG_NUM, sizeof(struct reg));
402 struct nds32_reg *reg_arch_info = calloc(TOTAL_REG_NUM, sizeof(struct nds32_reg));
403 int i;
404
405 if (!cache || !reg_list || !reg_arch_info) {
406 free(cache);
407 free(reg_list);
408 free(reg_arch_info);
409 return NULL;
410 }
411
412 cache->name = "Andes registers";
413 cache->next = NULL;
414 cache->reg_list = reg_list;
415 cache->num_regs = 0;
416
417 for (i = 0; i < TOTAL_REG_NUM; i++) {
418 reg_arch_info[i].num = i;
419 reg_arch_info[i].target = target;
420 reg_arch_info[i].nds32 = nds32;
421 reg_arch_info[i].enable = false;
422
423 reg_list[i].name = nds32_reg_simple_name(i);
424 reg_list[i].number = reg_arch_info[i].num;
425 reg_list[i].size = nds32_reg_size(i);
426 reg_list[i].arch_info = &reg_arch_info[i];
427
428 reg_list[i].reg_data_type = calloc(sizeof(struct reg_data_type), 1);
429
430 if (FD0 <= reg_arch_info[i].num && reg_arch_info[i].num <= FD31) {
431 reg_list[i].value = reg_arch_info[i].value;
432 reg_list[i].type = &nds32_reg_access_type_64;
433
434 reg_list[i].reg_data_type->type = REG_TYPE_IEEE_DOUBLE;
435 reg_list[i].reg_data_type->id = "ieee_double";
436 reg_list[i].group = "float";
437 } else {
438 reg_list[i].value = reg_arch_info[i].value;
439 reg_list[i].type = &nds32_reg_access_type;
440 reg_list[i].group = "general";
441
442 if ((FS0 <= reg_arch_info[i].num) && (reg_arch_info[i].num <= FS31)) {
443 reg_list[i].reg_data_type->type = REG_TYPE_IEEE_SINGLE;
444 reg_list[i].reg_data_type->id = "ieee_single";
445 reg_list[i].group = "float";
446 } else if ((reg_arch_info[i].num == FPCSR) ||
447 (reg_arch_info[i].num == FPCFG)) {
448 reg_list[i].group = "float";
449 } else if ((reg_arch_info[i].num == R28) ||
450 (reg_arch_info[i].num == R29) ||
451 (reg_arch_info[i].num == R31)) {
452 reg_list[i].reg_data_type->type = REG_TYPE_DATA_PTR;
453 reg_list[i].reg_data_type->id = "data_ptr";
454 } else if ((reg_arch_info[i].num == R30) ||
455 (reg_arch_info[i].num == PC)) {
456 reg_list[i].reg_data_type->type = REG_TYPE_CODE_PTR;
457 reg_list[i].reg_data_type->id = "code_ptr";
458 } else {
459 reg_list[i].reg_data_type->type = REG_TYPE_UINT32;
460 reg_list[i].reg_data_type->id = "uint32";
461 }
462 }
463
464 if (R16 <= reg_arch_info[i].num && reg_arch_info[i].num <= R25)
465 reg_list[i].caller_save = true;
466 else
467 reg_list[i].caller_save = false;
468
469 reg_list[i].feature = malloc(sizeof(struct reg_feature));
470
471 if (R0 <= reg_arch_info[i].num && reg_arch_info[i].num <= IFC_LP)
472 reg_list[i].feature->name = "org.gnu.gdb.nds32.core";
473 else if (CR0 <= reg_arch_info[i].num && reg_arch_info[i].num <= SECUR0)
474 reg_list[i].feature->name = "org.gnu.gdb.nds32.system";
475 else if (D0L24 <= reg_arch_info[i].num && reg_arch_info[i].num <= CBE3)
476 reg_list[i].feature->name = "org.gnu.gdb.nds32.audio";
477 else if (FPCSR <= reg_arch_info[i].num && reg_arch_info[i].num <= FD31)
478 reg_list[i].feature->name = "org.gnu.gdb.nds32.fpu";
479
480 cache->num_regs++;
481 }
482
483 nds32->core_cache = cache;
484
485 return cache;
486 }
487
488 static int nds32_reg_cache_init(struct target *target, struct nds32 *nds32)
489 {
490 struct reg_cache *cache;
491
492 cache = nds32_build_reg_cache(target, nds32);
493 if (!cache)
494 return ERROR_FAIL;
495
496 *register_get_last_cache_p(&target->reg_cache) = cache;
497
498 return ERROR_OK;
499 }
500
501 static struct reg *nds32_reg_current(struct nds32 *nds32, unsigned regnum)
502 {
503 struct reg *r;
504
505 r = nds32->core_cache->reg_list + regnum;
506
507 return r;
508 }
509
510 int nds32_full_context(struct nds32 *nds32)
511 {
512 uint32_t value, value_ir0;
513
514 /* save $pc & $psw */
515 nds32_get_mapped_reg(nds32, PC, &value);
516 nds32_get_mapped_reg(nds32, IR0, &value_ir0);
517
518 nds32_update_psw(nds32);
519 nds32_update_mmu_info(nds32);
520 nds32_update_cache_info(nds32);
521 nds32_update_lm_info(nds32);
522
523 nds32_check_extension(nds32);
524
525 return ERROR_OK;
526 }
527
528 /* get register value internally */
529 int nds32_get_mapped_reg(struct nds32 *nds32, unsigned regnum, uint32_t *value)
530 {
531 struct reg_cache *reg_cache = nds32->core_cache;
532 struct reg *r;
533
534 if (regnum > reg_cache->num_regs)
535 return ERROR_FAIL;
536
537 r = nds32_reg_current(nds32, regnum);
538
539 if (ERROR_OK != r->type->get(r))
540 return ERROR_FAIL;
541
542 *value = buf_get_u32(r->value, 0, 32);
543
544 return ERROR_OK;
545 }
546
547 /** set register internally */
548 int nds32_set_mapped_reg(struct nds32 *nds32, unsigned regnum, uint32_t value)
549 {
550 struct reg_cache *reg_cache = nds32->core_cache;
551 struct reg *r;
552 uint8_t set_value[4];
553
554 if (regnum > reg_cache->num_regs)
555 return ERROR_FAIL;
556
557 r = nds32_reg_current(nds32, regnum);
558
559 buf_set_u32(set_value, 0, 32, value);
560
561 return r->type->set(r, set_value);
562 }
563
564 /** get general register list */
565 static int nds32_get_general_reg_list(struct nds32 *nds32,
566 struct reg **reg_list[], int *reg_list_size)
567 {
568 struct reg *reg_current;
569 int i;
570 int current_idx;
571
572 /** freed in gdb_server.c */
573 *reg_list = malloc(sizeof(struct reg *) * (IFC_LP - R0 + 1));
574 current_idx = 0;
575
576 for (i = R0; i < IFC_LP + 1; i++) {
577 reg_current = nds32_reg_current(nds32, i);
578 if (((struct nds32_reg *)reg_current->arch_info)->enable) {
579 (*reg_list)[current_idx] = reg_current;
580 current_idx++;
581 }
582 }
583 *reg_list_size = current_idx;
584
585 return ERROR_OK;
586 }
587
588 /** get all register list */
589 static int nds32_get_all_reg_list(struct nds32 *nds32,
590 struct reg **reg_list[], int *reg_list_size)
591 {
592 struct reg_cache *reg_cache = nds32->core_cache;
593 struct reg *reg_current;
594 unsigned int i;
595
596 *reg_list_size = reg_cache->num_regs;
597
598 /** freed in gdb_server.c */
599 *reg_list = malloc(sizeof(struct reg *) * (*reg_list_size));
600
601 for (i = 0; i < reg_cache->num_regs; i++) {
602 reg_current = nds32_reg_current(nds32, i);
603 reg_current->exist = ((struct nds32_reg *)
604 reg_current->arch_info)->enable;
605 (*reg_list)[i] = reg_current;
606 }
607
608 return ERROR_OK;
609 }
610
611 /** get all register list */
612 int nds32_get_gdb_reg_list(struct target *target,
613 struct reg **reg_list[], int *reg_list_size,
614 enum target_register_class reg_class)
615 {
616 struct nds32 *nds32 = target_to_nds32(target);
617
618 switch (reg_class) {
619 case REG_CLASS_ALL:
620 return nds32_get_all_reg_list(nds32, reg_list, reg_list_size);
621 case REG_CLASS_GENERAL:
622 return nds32_get_general_reg_list(nds32, reg_list, reg_list_size);
623 default:
624 return ERROR_FAIL;
625 }
626
627 return ERROR_FAIL;
628 }
629
630 static int nds32_select_memory_mode(struct target *target, uint32_t address,
631 uint32_t length, uint32_t *end_address)
632 {
633 struct nds32 *nds32 = target_to_nds32(target);
634 struct aice_port_s *aice = target_to_aice(target);
635 struct nds32_memory *memory = &(nds32->memory);
636 struct nds32_edm *edm = &(nds32->edm);
637 uint32_t dlm_start, dlm_end;
638 uint32_t ilm_start, ilm_end;
639 uint32_t address_end = address + length;
640
641 /* init end_address */
642 *end_address = address_end;
643
644 if (NDS_MEMORY_ACC_CPU == memory->access_channel)
645 return ERROR_OK;
646
647 if (edm->access_control == false) {
648 LOG_DEBUG("EDM does not support ACC_CTL");
649 return ERROR_OK;
650 }
651
652 if (edm->direct_access_local_memory == false) {
653 LOG_DEBUG("EDM does not support DALM");
654 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
655 return ERROR_OK;
656 }
657
658 if (NDS_MEMORY_SELECT_AUTO != memory->mode) {
659 LOG_DEBUG("Memory mode is not AUTO");
660 return ERROR_OK;
661 }
662
663 /* set default mode */
664 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
665
666 if ((memory->ilm_base != 0) && (memory->ilm_enable == true)) {
667 ilm_start = memory->ilm_start;
668 ilm_end = memory->ilm_end;
669
670 /* case 1, address < ilm_start */
671 if (address < ilm_start) {
672 if (ilm_start < address_end) {
673 /* update end_address to split non-ILM from ILM */
674 *end_address = ilm_start;
675 }
676 /* MEM mode */
677 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
678 } else if ((ilm_start <= address) && (address < ilm_end)) {
679 /* case 2, ilm_start <= address < ilm_end */
680 if (ilm_end < address_end) {
681 /* update end_address to split non-ILM from ILM */
682 *end_address = ilm_end;
683 }
684 /* ILM mode */
685 aice_memory_mode(aice, NDS_MEMORY_SELECT_ILM);
686 } else { /* case 3, ilm_end <= address */
687 /* MEM mode */
688 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
689 }
690
691 return ERROR_OK;
692 } else {
693 LOG_DEBUG("ILM is not enabled");
694 }
695
696 if ((memory->dlm_base != 0) && (memory->dlm_enable == true)) {
697 dlm_start = memory->dlm_start;
698 dlm_end = memory->dlm_end;
699
700 /* case 1, address < dlm_start */
701 if (address < dlm_start) {
702 if (dlm_start < address_end) {
703 /* update end_address to split non-DLM from DLM */
704 *end_address = dlm_start;
705 }
706 /* MEM mode */
707 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
708 } else if ((dlm_start <= address) && (address < dlm_end)) {
709 /* case 2, dlm_start <= address < dlm_end */
710 if (dlm_end < address_end) {
711 /* update end_address to split non-DLM from DLM */
712 *end_address = dlm_end;
713 }
714 /* DLM mode */
715 aice_memory_mode(aice, NDS_MEMORY_SELECT_DLM);
716 } else { /* case 3, dlm_end <= address */
717 /* MEM mode */
718 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
719 }
720
721 return ERROR_OK;
722 } else {
723 LOG_DEBUG("DLM is not enabled");
724 }
725
726 return ERROR_OK;
727 }
728
729 int nds32_read_buffer(struct target *target, uint32_t address,
730 uint32_t size, uint8_t *buffer)
731 {
732 struct nds32 *nds32 = target_to_nds32(target);
733 struct nds32_memory *memory = &(nds32->memory);
734
735 if ((NDS_MEMORY_ACC_CPU == memory->access_channel) &&
736 (target->state != TARGET_HALTED)) {
737 LOG_WARNING("target was not halted");
738 return ERROR_TARGET_NOT_HALTED;
739 }
740
741 LOG_DEBUG("READ BUFFER: ADDR %08" PRIx32 " SIZE %08" PRIx32,
742 address,
743 size);
744
745 int retval = ERROR_OK;
746 struct aice_port_s *aice = target_to_aice(target);
747 uint32_t end_address;
748
749 if (((address % 2) == 0) && (size == 2)) {
750 nds32_select_memory_mode(target, address, 2, &end_address);
751 return aice_read_mem_unit(aice, address, 2, 1, buffer);
752 }
753
754 /* handle unaligned head bytes */
755 if (address % 4) {
756 uint32_t unaligned = 4 - (address % 4);
757
758 if (unaligned > size)
759 unaligned = size;
760
761 nds32_select_memory_mode(target, address, unaligned, &end_address);
762 retval = aice_read_mem_unit(aice, address, 1, unaligned, buffer);
763 if (retval != ERROR_OK)
764 return retval;
765
766 buffer += unaligned;
767 address += unaligned;
768 size -= unaligned;
769 }
770
771 /* handle aligned words */
772 if (size >= 4) {
773 int aligned = size - (size % 4);
774 int read_len;
775
776 do {
777 nds32_select_memory_mode(target, address, aligned, &end_address);
778
779 read_len = end_address - address;
780
781 if (read_len > 8)
782 retval = aice_read_mem_bulk(aice, address, read_len, buffer);
783 else
784 retval = aice_read_mem_unit(aice, address, 4, read_len / 4, buffer);
785
786 if (retval != ERROR_OK)
787 return retval;
788
789 buffer += read_len;
790 address += read_len;
791 size -= read_len;
792 aligned -= read_len;
793
794 } while (aligned != 0);
795 }
796
797 /*prevent byte access when possible (avoid AHB access limitations in some cases)*/
798 if (size >= 2) {
799 int aligned = size - (size % 2);
800 nds32_select_memory_mode(target, address, aligned, &end_address);
801 retval = aice_read_mem_unit(aice, address, 2, aligned / 2, buffer);
802 if (retval != ERROR_OK)
803 return retval;
804
805 buffer += aligned;
806 address += aligned;
807 size -= aligned;
808 }
809 /* handle tail writes of less than 4 bytes */
810 if (size > 0) {
811 nds32_select_memory_mode(target, address, size, &end_address);
812 retval = aice_read_mem_unit(aice, address, 1, size, buffer);
813 if (retval != ERROR_OK)
814 return retval;
815 }
816
817 return ERROR_OK;
818 }
819
820 int nds32_read_memory(struct target *target, uint32_t address,
821 uint32_t size, uint32_t count, uint8_t *buffer)
822 {
823 struct aice_port_s *aice = target_to_aice(target);
824
825 return aice_read_mem_unit(aice, address, size, count, buffer);
826 }
827
828 int nds32_read_phys_memory(struct target *target, uint32_t address,
829 uint32_t size, uint32_t count, uint8_t *buffer)
830 {
831 struct aice_port_s *aice = target_to_aice(target);
832 struct nds32 *nds32 = target_to_nds32(target);
833 struct nds32_memory *memory = &(nds32->memory);
834 enum nds_memory_access orig_channel;
835 int result;
836
837 /* switch to BUS access mode to skip MMU */
838 orig_channel = memory->access_channel;
839 memory->access_channel = NDS_MEMORY_ACC_BUS;
840 aice_memory_access(aice, memory->access_channel);
841
842 /* The input address is physical address. No need to do address translation. */
843 result = aice_read_mem_unit(aice, address, size, count, buffer);
844
845 /* restore to origin access mode */
846 memory->access_channel = orig_channel;
847 aice_memory_access(aice, memory->access_channel);
848
849 return result;
850 }
851
852 int nds32_write_buffer(struct target *target, uint32_t address,
853 uint32_t size, const uint8_t *buffer)
854 {
855 struct nds32 *nds32 = target_to_nds32(target);
856 struct nds32_memory *memory = &(nds32->memory);
857
858 if ((NDS_MEMORY_ACC_CPU == memory->access_channel) &&
859 (target->state != TARGET_HALTED)) {
860 LOG_WARNING("target was not halted");
861 return ERROR_TARGET_NOT_HALTED;
862 }
863
864 LOG_DEBUG("WRITE BUFFER: ADDR %08" PRIx32 " SIZE %08" PRIx32,
865 address,
866 size);
867
868 struct aice_port_s *aice = target_to_aice(target);
869 int retval = ERROR_OK;
870 uint32_t end_address;
871
872 if (((address % 2) == 0) && (size == 2)) {
873 nds32_select_memory_mode(target, address, 2, &end_address);
874 return aice_write_mem_unit(aice, address, 2, 1, buffer);
875 }
876
877 /* handle unaligned head bytes */
878 if (address % 4) {
879 uint32_t unaligned = 4 - (address % 4);
880
881 if (unaligned > size)
882 unaligned = size;
883
884 nds32_select_memory_mode(target, address, unaligned, &end_address);
885 retval = aice_write_mem_unit(aice, address, 1, unaligned, buffer);
886 if (retval != ERROR_OK)
887 return retval;
888
889 buffer += unaligned;
890 address += unaligned;
891 size -= unaligned;
892 }
893
894 /* handle aligned words */
895 if (size >= 4) {
896 int aligned = size - (size % 4);
897 int write_len;
898
899 do {
900 nds32_select_memory_mode(target, address, aligned, &end_address);
901
902 write_len = end_address - address;
903 if (write_len > 8)
904 retval = aice_write_mem_bulk(aice, address, write_len, buffer);
905 else
906 retval = aice_write_mem_unit(aice, address, 4, write_len / 4, buffer);
907 if (retval != ERROR_OK)
908 return retval;
909
910 buffer += write_len;
911 address += write_len;
912 size -= write_len;
913 aligned -= write_len;
914
915 } while (aligned != 0);
916 }
917
918 /* handle tail writes of less than 4 bytes */
919 if (size > 0) {
920 nds32_select_memory_mode(target, address, size, &end_address);
921 retval = aice_write_mem_unit(aice, address, 1, size, buffer);
922 if (retval != ERROR_OK)
923 return retval;
924 }
925
926 return retval;
927 }
928
929 int nds32_write_memory(struct target *target, uint32_t address,
930 uint32_t size, uint32_t count, const uint8_t *buffer)
931 {
932 struct aice_port_s *aice = target_to_aice(target);
933
934 return aice_write_mem_unit(aice, address, size, count, buffer);
935 }
936
937 int nds32_write_phys_memory(struct target *target, uint32_t address,
938 uint32_t size, uint32_t count, const uint8_t *buffer)
939 {
940 struct aice_port_s *aice = target_to_aice(target);
941 struct nds32 *nds32 = target_to_nds32(target);
942 struct nds32_memory *memory = &(nds32->memory);
943 enum nds_memory_access orig_channel;
944 int result;
945
946 /* switch to BUS access mode to skip MMU */
947 orig_channel = memory->access_channel;
948 memory->access_channel = NDS_MEMORY_ACC_BUS;
949 aice_memory_access(aice, memory->access_channel);
950
951 /* The input address is physical address. No need to do address translation. */
952 result = aice_write_mem_unit(aice, address, size, count, buffer);
953
954 /* restore to origin access mode */
955 memory->access_channel = orig_channel;
956 aice_memory_access(aice, memory->access_channel);
957
958 return result;
959 }
960
961 int nds32_mmu(struct target *target, int *enabled)
962 {
963 if (target->state != TARGET_HALTED) {
964 LOG_ERROR("%s: target not halted", __func__);
965 return ERROR_TARGET_INVALID;
966 }
967
968 struct nds32 *nds32 = target_to_nds32(target);
969 struct nds32_memory *memory = &(nds32->memory);
970 struct nds32_mmu_config *mmu_config = &(nds32->mmu_config);
971
972 if ((mmu_config->memory_protection == 2) && (memory->address_translation == true))
973 *enabled = 1;
974 else
975 *enabled = 0;
976
977 return ERROR_OK;
978 }
979
980 int nds32_arch_state(struct target *target)
981 {
982 struct nds32 *nds32 = target_to_nds32(target);
983
984 if (nds32->common_magic != NDS32_COMMON_MAGIC) {
985 LOG_ERROR("BUG: called for a non-Andes target");
986 return ERROR_FAIL;
987 }
988
989 uint32_t value_pc, value_psw;
990
991 nds32_get_mapped_reg(nds32, PC, &value_pc);
992 nds32_get_mapped_reg(nds32, IR0, &value_psw);
993
994 LOG_USER("target halted due to %s\n"
995 "psw: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "%s",
996 debug_reason_name(target),
997 value_psw,
998 value_pc,
999 nds32->virtual_hosting ? ", virtual hosting" : "");
1000
1001 /* save pc value to pseudo register pc */
1002 struct reg *reg = register_get_by_name(target->reg_cache, "pc", 1);
1003 buf_set_u32(reg->value, 0, 32, value_pc);
1004
1005 return ERROR_OK;
1006 }
1007
1008 static void nds32_init_must_have_registers(struct nds32 *nds32)
1009 {
1010 struct reg_cache *reg_cache = nds32->core_cache;
1011
1012 /** MUST have general registers */
1013 ((struct nds32_reg *)reg_cache->reg_list[R0].arch_info)->enable = true;
1014 ((struct nds32_reg *)reg_cache->reg_list[R1].arch_info)->enable = true;
1015 ((struct nds32_reg *)reg_cache->reg_list[R2].arch_info)->enable = true;
1016 ((struct nds32_reg *)reg_cache->reg_list[R3].arch_info)->enable = true;
1017 ((struct nds32_reg *)reg_cache->reg_list[R4].arch_info)->enable = true;
1018 ((struct nds32_reg *)reg_cache->reg_list[R5].arch_info)->enable = true;
1019 ((struct nds32_reg *)reg_cache->reg_list[R6].arch_info)->enable = true;
1020 ((struct nds32_reg *)reg_cache->reg_list[R7].arch_info)->enable = true;
1021 ((struct nds32_reg *)reg_cache->reg_list[R8].arch_info)->enable = true;
1022 ((struct nds32_reg *)reg_cache->reg_list[R9].arch_info)->enable = true;
1023 ((struct nds32_reg *)reg_cache->reg_list[R10].arch_info)->enable = true;
1024 ((struct nds32_reg *)reg_cache->reg_list[R15].arch_info)->enable = true;
1025 ((struct nds32_reg *)reg_cache->reg_list[R28].arch_info)->enable = true;
1026 ((struct nds32_reg *)reg_cache->reg_list[R29].arch_info)->enable = true;
1027 ((struct nds32_reg *)reg_cache->reg_list[R30].arch_info)->enable = true;
1028 ((struct nds32_reg *)reg_cache->reg_list[R31].arch_info)->enable = true;
1029 ((struct nds32_reg *)reg_cache->reg_list[PC].arch_info)->enable = true;
1030
1031 /** MUST have configuration system registers */
1032 ((struct nds32_reg *)reg_cache->reg_list[CR0].arch_info)->enable = true;
1033 ((struct nds32_reg *)reg_cache->reg_list[CR1].arch_info)->enable = true;
1034 ((struct nds32_reg *)reg_cache->reg_list[CR2].arch_info)->enable = true;
1035 ((struct nds32_reg *)reg_cache->reg_list[CR3].arch_info)->enable = true;
1036 ((struct nds32_reg *)reg_cache->reg_list[CR4].arch_info)->enable = true;
1037
1038 /** MUST have interrupt system registers */
1039 ((struct nds32_reg *)reg_cache->reg_list[IR0].arch_info)->enable = true;
1040 ((struct nds32_reg *)reg_cache->reg_list[IR1].arch_info)->enable = true;
1041 ((struct nds32_reg *)reg_cache->reg_list[IR3].arch_info)->enable = true;
1042 ((struct nds32_reg *)reg_cache->reg_list[IR4].arch_info)->enable = true;
1043 ((struct nds32_reg *)reg_cache->reg_list[IR6].arch_info)->enable = true;
1044 ((struct nds32_reg *)reg_cache->reg_list[IR9].arch_info)->enable = true;
1045 ((struct nds32_reg *)reg_cache->reg_list[IR11].arch_info)->enable = true;
1046 ((struct nds32_reg *)reg_cache->reg_list[IR14].arch_info)->enable = true;
1047 ((struct nds32_reg *)reg_cache->reg_list[IR15].arch_info)->enable = true;
1048
1049 /** MUST have MMU system registers */
1050 ((struct nds32_reg *)reg_cache->reg_list[MR0].arch_info)->enable = true;
1051
1052 /** MUST have EDM system registers */
1053 ((struct nds32_reg *)reg_cache->reg_list[DR40].arch_info)->enable = true;
1054 ((struct nds32_reg *)reg_cache->reg_list[DR42].arch_info)->enable = true;
1055 }
1056
1057 static int nds32_init_memory_config(struct nds32 *nds32)
1058 {
1059 uint32_t value_cr1; /* ICM_CFG */
1060 uint32_t value_cr2; /* DCM_CFG */
1061 struct nds32_memory *memory = &(nds32->memory);
1062
1063 /* read $cr1 to init instruction memory information */
1064 nds32_get_mapped_reg(nds32, CR1, &value_cr1);
1065 memory->icache.set = value_cr1 & 0x7;
1066 memory->icache.way = (value_cr1 >> 3) & 0x7;
1067 memory->icache.line_size = (value_cr1 >> 6) & 0x7;
1068 memory->icache.lock_support = (value_cr1 >> 9) & 0x1;
1069
1070 memory->ilm_base = (value_cr1 >> 10) & 0x7;
1071 memory->ilm_align_ver = (value_cr1 >> 13) & 0x3;
1072
1073 /* read $cr2 to init data memory information */
1074 nds32_get_mapped_reg(nds32, CR2, &value_cr2);
1075 memory->dcache.set = value_cr2 & 0x7;
1076 memory->dcache.way = (value_cr2 >> 3) & 0x7;
1077 memory->dcache.line_size = (value_cr2 >> 6) & 0x7;
1078 memory->dcache.lock_support = (value_cr2 >> 9) & 0x1;
1079
1080 memory->dlm_base = (value_cr2 >> 10) & 0x7;
1081 memory->dlm_align_ver = (value_cr2 >> 13) & 0x3;
1082
1083 return ERROR_OK;
1084 }
1085
1086 static void nds32_init_config(struct nds32 *nds32)
1087 {
1088 uint32_t value_cr0;
1089 uint32_t value_cr3;
1090 uint32_t value_cr4;
1091 struct nds32_cpu_version *cpu_version = &(nds32->cpu_version);
1092 struct nds32_mmu_config *mmu_config = &(nds32->mmu_config);
1093 struct nds32_misc_config *misc_config = &(nds32->misc_config);
1094
1095 nds32_get_mapped_reg(nds32, CR0, &value_cr0);
1096 nds32_get_mapped_reg(nds32, CR3, &value_cr3);
1097 nds32_get_mapped_reg(nds32, CR4, &value_cr4);
1098
1099 /* config cpu version */
1100 cpu_version->performance_extension = value_cr0 & 0x1;
1101 cpu_version->_16bit_extension = (value_cr0 >> 1) & 0x1;
1102 cpu_version->performance_extension_2 = (value_cr0 >> 2) & 0x1;
1103 cpu_version->cop_fpu_extension = (value_cr0 >> 3) & 0x1;
1104 cpu_version->string_extension = (value_cr0 >> 4) & 0x1;
1105 cpu_version->revision = (value_cr0 >> 16) & 0xFF;
1106 cpu_version->cpu_id_family = (value_cr0 >> 24) & 0xF;
1107 cpu_version->cpu_id_version = (value_cr0 >> 28) & 0xF;
1108
1109 /* config MMU */
1110 mmu_config->memory_protection = value_cr3 & 0x3;
1111 mmu_config->memory_protection_version = (value_cr3 >> 2) & 0x1F;
1112 mmu_config->fully_associative_tlb = (value_cr3 >> 7) & 0x1;
1113 if (mmu_config->fully_associative_tlb) {
1114 mmu_config->tlb_size = (value_cr3 >> 8) & 0x7F;
1115 } else {
1116 mmu_config->tlb_ways = (value_cr3 >> 8) & 0x7;
1117 mmu_config->tlb_sets = (value_cr3 >> 11) & 0x7;
1118 }
1119 mmu_config->_8k_page_support = (value_cr3 >> 15) & 0x1;
1120 mmu_config->extra_page_size_support = (value_cr3 >> 16) & 0xFF;
1121 mmu_config->tlb_lock = (value_cr3 >> 24) & 0x1;
1122 mmu_config->hardware_page_table_walker = (value_cr3 >> 25) & 0x1;
1123 mmu_config->default_endian = (value_cr3 >> 26) & 0x1;
1124 mmu_config->partition_num = (value_cr3 >> 27) & 0x1;
1125 mmu_config->invisible_tlb = (value_cr3 >> 28) & 0x1;
1126 mmu_config->vlpt = (value_cr3 >> 29) & 0x1;
1127 mmu_config->ntme = (value_cr3 >> 30) & 0x1;
1128 mmu_config->drde = (value_cr3 >> 31) & 0x1;
1129
1130 /* config misc */
1131 misc_config->edm = value_cr4 & 0x1;
1132 misc_config->local_memory_dma = (value_cr4 >> 1) & 0x1;
1133 misc_config->performance_monitor = (value_cr4 >> 2) & 0x1;
1134 misc_config->high_speed_memory_port = (value_cr4 >> 3) & 0x1;
1135 misc_config->debug_tracer = (value_cr4 >> 4) & 0x1;
1136 misc_config->div_instruction = (value_cr4 >> 5) & 0x1;
1137 misc_config->mac_instruction = (value_cr4 >> 6) & 0x1;
1138 misc_config->audio_isa = (value_cr4 >> 7) & 0x3;
1139 misc_config->L2_cache = (value_cr4 >> 9) & 0x1;
1140 misc_config->reduce_register = (value_cr4 >> 10) & 0x1;
1141 misc_config->addr_24 = (value_cr4 >> 11) & 0x1;
1142 misc_config->interruption_level = (value_cr4 >> 12) & 0x1;
1143 misc_config->baseline_instruction = (value_cr4 >> 13) & 0x7;
1144 misc_config->no_dx_register = (value_cr4 >> 16) & 0x1;
1145 misc_config->implement_dependant_register = (value_cr4 >> 17) & 0x1;
1146 misc_config->implement_dependant_sr_encoding = (value_cr4 >> 18) & 0x1;
1147 misc_config->ifc = (value_cr4 >> 19) & 0x1;
1148 misc_config->mcu = (value_cr4 >> 20) & 0x1;
1149 misc_config->shadow = (value_cr4 >> 21) & 0x7;
1150 misc_config->ex9 = (value_cr4 >> 24) & 0x1;
1151
1152 nds32_init_memory_config(nds32);
1153 }
1154
1155 static int nds32_init_option_registers(struct nds32 *nds32)
1156 {
1157 struct reg_cache *reg_cache = nds32->core_cache;
1158 struct nds32_cpu_version *cpu_version = &(nds32->cpu_version);
1159 struct nds32_mmu_config *mmu_config = &(nds32->mmu_config);
1160 struct nds32_misc_config *misc_config = &(nds32->misc_config);
1161 struct nds32_memory *memory_config = &(nds32->memory);
1162
1163 bool no_cr5;
1164 bool mr10_exist;
1165 bool no_racr0;
1166
1167 if (((cpu_version->cpu_id_family == 0xC) || (cpu_version->cpu_id_family == 0xD)) &&
1168 ((cpu_version->revision & 0xFC) == 0)) {
1169 no_cr5 = true;
1170 mr10_exist = true;
1171 no_racr0 = true;
1172 } else {
1173 no_cr5 = false;
1174 mr10_exist = false;
1175 no_racr0 = false;
1176 }
1177
1178 if (misc_config->reduce_register == false) {
1179 ((struct nds32_reg *)reg_cache->reg_list[R11].arch_info)->enable = true;
1180 ((struct nds32_reg *)reg_cache->reg_list[R12].arch_info)->enable = true;
1181 ((struct nds32_reg *)reg_cache->reg_list[R13].arch_info)->enable = true;
1182 ((struct nds32_reg *)reg_cache->reg_list[R14].arch_info)->enable = true;
1183 ((struct nds32_reg *)reg_cache->reg_list[R16].arch_info)->enable = true;
1184 ((struct nds32_reg *)reg_cache->reg_list[R17].arch_info)->enable = true;
1185 ((struct nds32_reg *)reg_cache->reg_list[R18].arch_info)->enable = true;
1186 ((struct nds32_reg *)reg_cache->reg_list[R19].arch_info)->enable = true;
1187 ((struct nds32_reg *)reg_cache->reg_list[R20].arch_info)->enable = true;
1188 ((struct nds32_reg *)reg_cache->reg_list[R21].arch_info)->enable = true;
1189 ((struct nds32_reg *)reg_cache->reg_list[R22].arch_info)->enable = true;
1190 ((struct nds32_reg *)reg_cache->reg_list[R23].arch_info)->enable = true;
1191 ((struct nds32_reg *)reg_cache->reg_list[R24].arch_info)->enable = true;
1192 ((struct nds32_reg *)reg_cache->reg_list[R25].arch_info)->enable = true;
1193 ((struct nds32_reg *)reg_cache->reg_list[R26].arch_info)->enable = true;
1194 ((struct nds32_reg *)reg_cache->reg_list[R27].arch_info)->enable = true;
1195 }
1196
1197 if (misc_config->no_dx_register == false) {
1198 ((struct nds32_reg *)reg_cache->reg_list[D0LO].arch_info)->enable = true;
1199 ((struct nds32_reg *)reg_cache->reg_list[D0HI].arch_info)->enable = true;
1200 ((struct nds32_reg *)reg_cache->reg_list[D1LO].arch_info)->enable = true;
1201 ((struct nds32_reg *)reg_cache->reg_list[D1HI].arch_info)->enable = true;
1202 }
1203
1204 if (misc_config->ex9)
1205 ((struct nds32_reg *)reg_cache->reg_list[ITB].arch_info)->enable = true;
1206
1207 if (no_cr5 == false)
1208 ((struct nds32_reg *)reg_cache->reg_list[CR5].arch_info)->enable = true;
1209
1210 if (cpu_version->cop_fpu_extension) {
1211 ((struct nds32_reg *)reg_cache->reg_list[CR6].arch_info)->enable = true;
1212 ((struct nds32_reg *)reg_cache->reg_list[FPCSR].arch_info)->enable = true;
1213 ((struct nds32_reg *)reg_cache->reg_list[FPCFG].arch_info)->enable = true;
1214 }
1215
1216 if (mmu_config->memory_protection == 1) {
1217 /* Secure MPU has no IPC, IPSW, P_ITYPE */
1218 ((struct nds32_reg *)reg_cache->reg_list[IR1].arch_info)->enable = false;
1219 ((struct nds32_reg *)reg_cache->reg_list[IR9].arch_info)->enable = false;
1220 }
1221
1222 if (nds32->privilege_level != 0)
1223 ((struct nds32_reg *)reg_cache->reg_list[IR3].arch_info)->enable = false;
1224
1225 if (misc_config->mcu == true)
1226 ((struct nds32_reg *)reg_cache->reg_list[IR4].arch_info)->enable = false;
1227
1228 if (misc_config->interruption_level == false) {
1229 ((struct nds32_reg *)reg_cache->reg_list[IR2].arch_info)->enable = true;
1230 ((struct nds32_reg *)reg_cache->reg_list[IR5].arch_info)->enable = true;
1231 ((struct nds32_reg *)reg_cache->reg_list[IR10].arch_info)->enable = true;
1232 ((struct nds32_reg *)reg_cache->reg_list[IR12].arch_info)->enable = true;
1233 ((struct nds32_reg *)reg_cache->reg_list[IR13].arch_info)->enable = true;
1234
1235 /* Secure MPU has no IPC, IPSW, P_ITYPE */
1236 if (mmu_config->memory_protection != 1)
1237 ((struct nds32_reg *)reg_cache->reg_list[IR7].arch_info)->enable = true;
1238 }
1239
1240 if ((cpu_version->cpu_id_family == 0x9) ||
1241 (cpu_version->cpu_id_family == 0xA) ||
1242 (cpu_version->cpu_id_family == 0xC) ||
1243 (cpu_version->cpu_id_family == 0xD))
1244 ((struct nds32_reg *)reg_cache->reg_list[IR8].arch_info)->enable = true;
1245
1246 if (misc_config->shadow == 1) {
1247 ((struct nds32_reg *)reg_cache->reg_list[IR16].arch_info)->enable = true;
1248 ((struct nds32_reg *)reg_cache->reg_list[IR17].arch_info)->enable = true;
1249 }
1250
1251 if (misc_config->ifc)
1252 ((struct nds32_reg *)reg_cache->reg_list[IFC_LP].arch_info)->enable = true;
1253
1254 if (nds32->privilege_level != 0)
1255 ((struct nds32_reg *)reg_cache->reg_list[MR0].arch_info)->enable = false;
1256
1257 if (mmu_config->memory_protection == 1) {
1258 if (mmu_config->memory_protection_version == 24)
1259 ((struct nds32_reg *)reg_cache->reg_list[MR4].arch_info)->enable = true;
1260
1261 if (nds32->privilege_level == 0) {
1262 if ((mmu_config->memory_protection_version == 16) ||
1263 (mmu_config->memory_protection_version == 24)) {
1264 ((struct nds32_reg *)reg_cache->reg_list[MR11].arch_info)->enable = true;
1265 ((struct nds32_reg *)reg_cache->reg_list[SECUR0].arch_info)->enable = true;
1266 ((struct nds32_reg *)reg_cache->reg_list[IR20].arch_info)->enable = true;
1267 ((struct nds32_reg *)reg_cache->reg_list[IR22].arch_info)->enable = true;
1268 ((struct nds32_reg *)reg_cache->reg_list[IR24].arch_info)->enable = true;
1269 ((struct nds32_reg *)reg_cache->reg_list[IR30].arch_info)->enable = true;
1270
1271 if (misc_config->shadow == 1) {
1272 ((struct nds32_reg *)reg_cache->reg_list[IR21].arch_info)->enable = true;
1273 ((struct nds32_reg *)reg_cache->reg_list[IR23].arch_info)->enable = true;
1274 ((struct nds32_reg *)reg_cache->reg_list[IR25].arch_info)->enable = true;
1275 }
1276 }
1277 }
1278 } else if (mmu_config->memory_protection == 2) {
1279 ((struct nds32_reg *)reg_cache->reg_list[MR1].arch_info)->enable = true;
1280 ((struct nds32_reg *)reg_cache->reg_list[MR4].arch_info)->enable = true;
1281
1282 if ((cpu_version->cpu_id_family != 0xA) && (cpu_version->cpu_id_family != 0xC) &&
1283 (cpu_version->cpu_id_family != 0xD))
1284 ((struct nds32_reg *)reg_cache->reg_list[MR5].arch_info)->enable = true;
1285 }
1286
1287 if (mmu_config->memory_protection > 0) {
1288 ((struct nds32_reg *)reg_cache->reg_list[MR2].arch_info)->enable = true;
1289 ((struct nds32_reg *)reg_cache->reg_list[MR3].arch_info)->enable = true;
1290 }
1291
1292 if (memory_config->ilm_base != 0)
1293 if (nds32->privilege_level == 0)
1294 ((struct nds32_reg *)reg_cache->reg_list[MR6].arch_info)->enable = true;
1295
1296 if (memory_config->dlm_base != 0)
1297 if (nds32->privilege_level == 0)
1298 ((struct nds32_reg *)reg_cache->reg_list[MR7].arch_info)->enable = true;
1299
1300 if ((memory_config->icache.line_size != 0) && (memory_config->dcache.line_size != 0))
1301 ((struct nds32_reg *)reg_cache->reg_list[MR8].arch_info)->enable = true;
1302
1303 if (misc_config->high_speed_memory_port)
1304 ((struct nds32_reg *)reg_cache->reg_list[MR9].arch_info)->enable = true;
1305
1306 if (mr10_exist)
1307 ((struct nds32_reg *)reg_cache->reg_list[MR10].arch_info)->enable = true;
1308
1309 if (misc_config->edm) {
1310 int dr_reg_n = nds32->edm.breakpoint_num * 5;
1311
1312 for (int i = 0 ; i < dr_reg_n ; i++)
1313 ((struct nds32_reg *)reg_cache->reg_list[DR0 + i].arch_info)->enable = true;
1314
1315 ((struct nds32_reg *)reg_cache->reg_list[DR41].arch_info)->enable = true;
1316 ((struct nds32_reg *)reg_cache->reg_list[DR43].arch_info)->enable = true;
1317 ((struct nds32_reg *)reg_cache->reg_list[DR44].arch_info)->enable = true;
1318 ((struct nds32_reg *)reg_cache->reg_list[DR45].arch_info)->enable = true;
1319 }
1320
1321 if (misc_config->debug_tracer) {
1322 ((struct nds32_reg *)reg_cache->reg_list[DR46].arch_info)->enable = true;
1323 ((struct nds32_reg *)reg_cache->reg_list[DR47].arch_info)->enable = true;
1324 }
1325
1326 if (misc_config->performance_monitor) {
1327 ((struct nds32_reg *)reg_cache->reg_list[PFR0].arch_info)->enable = true;
1328 ((struct nds32_reg *)reg_cache->reg_list[PFR1].arch_info)->enable = true;
1329 ((struct nds32_reg *)reg_cache->reg_list[PFR2].arch_info)->enable = true;
1330 ((struct nds32_reg *)reg_cache->reg_list[PFR3].arch_info)->enable = true;
1331 }
1332
1333 if (misc_config->local_memory_dma) {
1334 ((struct nds32_reg *)reg_cache->reg_list[DMAR0].arch_info)->enable = true;
1335 ((struct nds32_reg *)reg_cache->reg_list[DMAR1].arch_info)->enable = true;
1336 ((struct nds32_reg *)reg_cache->reg_list[DMAR2].arch_info)->enable = true;
1337 ((struct nds32_reg *)reg_cache->reg_list[DMAR3].arch_info)->enable = true;
1338 ((struct nds32_reg *)reg_cache->reg_list[DMAR4].arch_info)->enable = true;
1339 ((struct nds32_reg *)reg_cache->reg_list[DMAR5].arch_info)->enable = true;
1340 ((struct nds32_reg *)reg_cache->reg_list[DMAR6].arch_info)->enable = true;
1341 ((struct nds32_reg *)reg_cache->reg_list[DMAR7].arch_info)->enable = true;
1342 ((struct nds32_reg *)reg_cache->reg_list[DMAR8].arch_info)->enable = true;
1343 ((struct nds32_reg *)reg_cache->reg_list[DMAR9].arch_info)->enable = true;
1344 ((struct nds32_reg *)reg_cache->reg_list[DMAR10].arch_info)->enable = true;
1345 }
1346
1347 if ((misc_config->local_memory_dma || misc_config->performance_monitor) &&
1348 (no_racr0 == false))
1349 ((struct nds32_reg *)reg_cache->reg_list[RACR].arch_info)->enable = true;
1350
1351 if (cpu_version->cop_fpu_extension || (misc_config->audio_isa != 0))
1352 ((struct nds32_reg *)reg_cache->reg_list[FUCPR].arch_info)->enable = true;
1353
1354 if (misc_config->audio_isa != 0) {
1355 if (misc_config->audio_isa > 1) {
1356 ((struct nds32_reg *)reg_cache->reg_list[D0L24].arch_info)->enable = true;
1357 ((struct nds32_reg *)reg_cache->reg_list[D1L24].arch_info)->enable = true;
1358 }
1359
1360 ((struct nds32_reg *)reg_cache->reg_list[I0].arch_info)->enable = true;
1361 ((struct nds32_reg *)reg_cache->reg_list[I1].arch_info)->enable = true;
1362 ((struct nds32_reg *)reg_cache->reg_list[I2].arch_info)->enable = true;
1363 ((struct nds32_reg *)reg_cache->reg_list[I3].arch_info)->enable = true;
1364 ((struct nds32_reg *)reg_cache->reg_list[I4].arch_info)->enable = true;
1365 ((struct nds32_reg *)reg_cache->reg_list[I5].arch_info)->enable = true;
1366 ((struct nds32_reg *)reg_cache->reg_list[I6].arch_info)->enable = true;
1367 ((struct nds32_reg *)reg_cache->reg_list[I7].arch_info)->enable = true;
1368 ((struct nds32_reg *)reg_cache->reg_list[M1].arch_info)->enable = true;
1369 ((struct nds32_reg *)reg_cache->reg_list[M2].arch_info)->enable = true;
1370 ((struct nds32_reg *)reg_cache->reg_list[M3].arch_info)->enable = true;
1371 ((struct nds32_reg *)reg_cache->reg_list[M5].arch_info)->enable = true;
1372 ((struct nds32_reg *)reg_cache->reg_list[M6].arch_info)->enable = true;
1373 ((struct nds32_reg *)reg_cache->reg_list[M7].arch_info)->enable = true;
1374 ((struct nds32_reg *)reg_cache->reg_list[MOD].arch_info)->enable = true;
1375 ((struct nds32_reg *)reg_cache->reg_list[LBE].arch_info)->enable = true;
1376 ((struct nds32_reg *)reg_cache->reg_list[LE].arch_info)->enable = true;
1377 ((struct nds32_reg *)reg_cache->reg_list[LC].arch_info)->enable = true;
1378 ((struct nds32_reg *)reg_cache->reg_list[ADM_VBASE].arch_info)->enable = true;
1379 ((struct nds32_reg *)reg_cache->reg_list[SHFT_CTL0].arch_info)->enable = true;
1380 ((struct nds32_reg *)reg_cache->reg_list[SHFT_CTL1].arch_info)->enable = true;
1381
1382 uint32_t value_mod;
1383 uint32_t fucpr_backup;
1384 /* enable fpu and get configuration */
1385 nds32_get_mapped_reg(nds32, FUCPR, &fucpr_backup);
1386 if ((fucpr_backup & 0x80000000) == 0)
1387 nds32_set_mapped_reg(nds32, FUCPR, fucpr_backup | 0x80000000);
1388 nds32_get_mapped_reg(nds32, MOD, &value_mod);
1389 /* restore origin fucpr value */
1390 if ((fucpr_backup & 0x80000000) == 0)
1391 nds32_set_mapped_reg(nds32, FUCPR, fucpr_backup);
1392
1393 if ((value_mod >> 6) & 0x1) {
1394 ((struct nds32_reg *)reg_cache->reg_list[CB_CTL].arch_info)->enable = true;
1395 ((struct nds32_reg *)reg_cache->reg_list[CBB0].arch_info)->enable = true;
1396 ((struct nds32_reg *)reg_cache->reg_list[CBB1].arch_info)->enable = true;
1397 ((struct nds32_reg *)reg_cache->reg_list[CBB2].arch_info)->enable = true;
1398 ((struct nds32_reg *)reg_cache->reg_list[CBB3].arch_info)->enable = true;
1399 ((struct nds32_reg *)reg_cache->reg_list[CBE0].arch_info)->enable = true;
1400 ((struct nds32_reg *)reg_cache->reg_list[CBE1].arch_info)->enable = true;
1401 ((struct nds32_reg *)reg_cache->reg_list[CBE2].arch_info)->enable = true;
1402 ((struct nds32_reg *)reg_cache->reg_list[CBE3].arch_info)->enable = true;
1403 }
1404 }
1405
1406 if ((cpu_version->cpu_id_family == 0x9) ||
1407 (cpu_version->cpu_id_family == 0xA) ||
1408 (cpu_version->cpu_id_family == 0xC)) {
1409
1410 ((struct nds32_reg *)reg_cache->reg_list[IDR0].arch_info)->enable = true;
1411 ((struct nds32_reg *)reg_cache->reg_list[IDR1].arch_info)->enable = true;
1412
1413 if ((cpu_version->cpu_id_family == 0xC) && (cpu_version->revision == 0x0C))
1414 ((struct nds32_reg *)reg_cache->reg_list[IDR0].arch_info)->enable = false;
1415 }
1416
1417 uint32_t ir3_value;
1418 uint32_t ivb_prog_pri_lvl;
1419 uint32_t ivb_ivic_ver;
1420
1421 nds32_get_mapped_reg(nds32, IR3, &ir3_value);
1422 ivb_prog_pri_lvl = ir3_value & 0x1;
1423 ivb_ivic_ver = (ir3_value >> 11) & 0x3;
1424
1425 if ((ivb_prog_pri_lvl == 1) || (ivb_ivic_ver >= 1)) {
1426 ((struct nds32_reg *)reg_cache->reg_list[IR18].arch_info)->enable = true;
1427 ((struct nds32_reg *)reg_cache->reg_list[IR19].arch_info)->enable = true;
1428 }
1429
1430 if (ivb_ivic_ver >= 1) {
1431 ((struct nds32_reg *)reg_cache->reg_list[IR26].arch_info)->enable = true;
1432 ((struct nds32_reg *)reg_cache->reg_list[IR27].arch_info)->enable = true;
1433 ((struct nds32_reg *)reg_cache->reg_list[IR28].arch_info)->enable = true;
1434 ((struct nds32_reg *)reg_cache->reg_list[IR29].arch_info)->enable = true;
1435 }
1436
1437 return ERROR_OK;
1438 }
1439
1440 int nds32_init_register_table(struct nds32 *nds32)
1441 {
1442 nds32_init_must_have_registers(nds32);
1443
1444 return ERROR_OK;
1445 }
1446
1447 int nds32_add_software_breakpoint(struct target *target,
1448 struct breakpoint *breakpoint)
1449 {
1450 uint32_t data;
1451 uint32_t check_data;
1452 uint32_t break_insn;
1453
1454 /* check the breakpoint size */
1455 target->type->read_buffer(target, breakpoint->address, 4, (uint8_t *)&data);
1456
1457 /* backup origin instruction
1458 * instruction is big-endian */
1459 if (*(char *)&data & 0x80) { /* 16-bits instruction */
1460 breakpoint->length = 2;
1461 break_insn = NDS32_BREAK_16;
1462 } else { /* 32-bits instruction */
1463 breakpoint->length = 4;
1464 break_insn = NDS32_BREAK_32;
1465 }
1466
1467 if (breakpoint->orig_instr != NULL)
1468 free(breakpoint->orig_instr);
1469
1470 breakpoint->orig_instr = malloc(breakpoint->length);
1471 memcpy(breakpoint->orig_instr, &data, breakpoint->length);
1472
1473 /* self-modified code */
1474 target->type->write_buffer(target, breakpoint->address, breakpoint->length, (const uint8_t *)&break_insn);
1475 /* write_back & invalidate dcache & invalidate icache */
1476 nds32_cache_sync(target, breakpoint->address, breakpoint->length);
1477
1478 /* read back to check */
1479 target->type->read_buffer(target, breakpoint->address, breakpoint->length, (uint8_t *)&check_data);
1480 if (memcmp(&check_data, &break_insn, breakpoint->length) == 0)
1481 return ERROR_OK;
1482
1483 return ERROR_FAIL;
1484 }
1485
1486 int nds32_remove_software_breakpoint(struct target *target,
1487 struct breakpoint *breakpoint)
1488 {
1489 uint32_t check_data;
1490 uint32_t break_insn;
1491
1492 if (breakpoint->length == 2)
1493 break_insn = NDS32_BREAK_16;
1494 else if (breakpoint->length == 4)
1495 break_insn = NDS32_BREAK_32;
1496 else
1497 return ERROR_FAIL;
1498
1499 target->type->read_buffer(target, breakpoint->address, breakpoint->length,
1500 (uint8_t *)&check_data);
1501
1502 /* break instruction is modified */
1503 if (memcmp(&check_data, &break_insn, breakpoint->length) != 0)
1504 return ERROR_FAIL;
1505
1506 /* self-modified code */
1507 target->type->write_buffer(target, breakpoint->address, breakpoint->length,
1508 breakpoint->orig_instr);
1509
1510 /* write_back & invalidate dcache & invalidate icache */
1511 nds32_cache_sync(target, breakpoint->address, breakpoint->length);
1512
1513 return ERROR_OK;
1514 }
1515
1516 /**
1517 * Restore the processor context on an Andes target. The full processor
1518 * context is analyzed to see if any of the registers are dirty on this end, but
1519 * have a valid new value. If this is the case, the processor is changed to the
1520 * appropriate mode and the new register values are written out to the
1521 * processor. If there happens to be a dirty register with an invalid value, an
1522 * error will be logged.
1523 *
1524 * @param target Pointer to the Andes target to have its context restored
1525 * @return Error status if the target is not halted.
1526 */
1527 int nds32_restore_context(struct target *target)
1528 {
1529 struct nds32 *nds32 = target_to_nds32(target);
1530 struct aice_port_s *aice = target_to_aice(target);
1531 struct reg_cache *reg_cache = nds32->core_cache;
1532 struct reg *reg;
1533 struct nds32_reg *reg_arch_info;
1534 unsigned int i;
1535
1536 LOG_DEBUG("-");
1537
1538 if (target->state != TARGET_HALTED) {
1539 LOG_WARNING("target not halted");
1540 return ERROR_TARGET_NOT_HALTED;
1541 }
1542
1543 /* check if there are dirty registers */
1544 for (i = 0; i < reg_cache->num_regs; i++) {
1545 reg = &(reg_cache->reg_list[i]);
1546 if (reg->dirty == true) {
1547 if (reg->valid == true) {
1548
1549 LOG_DEBUG("examining dirty reg: %s", reg->name);
1550 LOG_DEBUG("writing register %d with value 0x%8.8" PRIx32,
1551 i, buf_get_u32(reg->value, 0, 32));
1552
1553 reg_arch_info = reg->arch_info;
1554 if (FD0 <= reg_arch_info->num && reg_arch_info->num <= FD31) {
1555 uint64_t val = buf_get_u64(reg_arch_info->value, 0, 64);
1556 aice_write_reg_64(aice, reg_arch_info->num, val);
1557 } else {
1558 uint32_t val = buf_get_u32(reg_arch_info->value, 0, 32);
1559 aice_write_register(aice, reg_arch_info->num, val);
1560 }
1561
1562 reg->valid = true;
1563 reg->dirty = false;
1564 }
1565 }
1566 }
1567
1568 return ERROR_OK;
1569 }
1570
1571 int nds32_edm_config(struct nds32 *nds32)
1572 {
1573 struct target *target = nds32->target;
1574 struct aice_port_s *aice = target_to_aice(target);
1575 uint32_t edm_cfg;
1576 uint32_t edm_ctl;
1577
1578 aice_read_debug_reg(aice, NDS_EDM_SR_EDM_CFG, &edm_cfg);
1579
1580 nds32->edm.version = (edm_cfg >> 16) & 0xFFFF;
1581 LOG_INFO("EDM version 0x%04x", nds32->edm.version);
1582
1583 nds32->edm.breakpoint_num = (edm_cfg & 0x7) + 1;
1584
1585 if ((nds32->edm.version & 0x1000) || (0x60 <= nds32->edm.version))
1586 nds32->edm.access_control = true;
1587 else
1588 nds32->edm.access_control = false;
1589
1590 if ((edm_cfg >> 4) & 0x1)
1591 nds32->edm.direct_access_local_memory = true;
1592 else
1593 nds32->edm.direct_access_local_memory = false;
1594
1595 if (nds32->edm.version <= 0x20)
1596 nds32->edm.direct_access_local_memory = false;
1597
1598 aice_read_debug_reg(aice, NDS_EDM_SR_EDM_CTL, &edm_ctl);
1599 if (edm_ctl & (0x1 << 29))
1600 nds32->edm.support_max_stop = true;
1601 else
1602 nds32->edm.support_max_stop = false;
1603
1604 /* set passcode for secure MCU */
1605 nds32_login(nds32);
1606
1607 return ERROR_OK;
1608 }
1609
1610 int nds32_config(struct nds32 *nds32)
1611 {
1612 nds32_init_config(nds32);
1613
1614 /* init optional system registers according to config registers */
1615 nds32_init_option_registers(nds32);
1616
1617 /* get max interrupt level */
1618 if (nds32->misc_config.interruption_level)
1619 nds32->max_interrupt_level = 2;
1620 else
1621 nds32->max_interrupt_level = 3;
1622
1623 /* get ILM/DLM size from MR6/MR7 */
1624 uint32_t value_mr6, value_mr7;
1625 uint32_t size_index;
1626 nds32_get_mapped_reg(nds32, MR6, &value_mr6);
1627 size_index = (value_mr6 >> 1) & 0xF;
1628 nds32->memory.ilm_size = NDS32_LM_SIZE_TABLE[size_index];
1629
1630 nds32_get_mapped_reg(nds32, MR7, &value_mr7);
1631 size_index = (value_mr7 >> 1) & 0xF;
1632 nds32->memory.dlm_size = NDS32_LM_SIZE_TABLE[size_index];
1633
1634 return ERROR_OK;
1635 }
1636
1637 int nds32_init_arch_info(struct target *target, struct nds32 *nds32)
1638 {
1639 target->arch_info = nds32;
1640 nds32->target = target;
1641
1642 nds32->common_magic = NDS32_COMMON_MAGIC;
1643 nds32->init_arch_info_after_halted = false;
1644 nds32->auto_convert_hw_bp = true;
1645 nds32->global_stop = false;
1646 nds32->soft_reset_halt = false;
1647 nds32->edm_passcode = NULL;
1648 nds32->privilege_level = 0;
1649 nds32->boot_time = 1500;
1650 nds32->reset_halt_as_examine = false;
1651 nds32->keep_target_edm_ctl = false;
1652 nds32->word_access_mem = false;
1653 nds32->virtual_hosting = true;
1654 nds32->hit_syscall = false;
1655 nds32->active_syscall_id = NDS32_SYSCALL_UNDEFINED;
1656 nds32->virtual_hosting_errno = 0;
1657 nds32->virtual_hosting_ctrl_c = false;
1658 nds32->attached = false;
1659
1660 nds32->syscall_break.asid = 0;
1661 nds32->syscall_break.length = 4;
1662 nds32->syscall_break.set = 0;
1663 nds32->syscall_break.orig_instr = NULL;
1664 nds32->syscall_break.next = NULL;
1665 nds32->syscall_break.unique_id = 0x515CAll + target->target_number;
1666 nds32->syscall_break.linked_BRP = 0;
1667
1668 nds32_reg_init();
1669
1670 if (ERROR_FAIL == nds32_reg_cache_init(target, nds32))
1671 return ERROR_FAIL;
1672
1673 if (ERROR_OK != nds32_init_register_table(nds32))
1674 return ERROR_FAIL;
1675
1676 return ERROR_OK;
1677 }
1678
1679 int nds32_virtual_to_physical(struct target *target, uint32_t address, uint32_t *physical)
1680 {
1681 struct nds32 *nds32 = target_to_nds32(target);
1682
1683 if (nds32->memory.address_translation == false) {
1684 *physical = address;
1685 return ERROR_OK;
1686 }
1687
1688 if (ERROR_OK == nds32_probe_tlb(nds32, address, physical))
1689 return ERROR_OK;
1690
1691 if (ERROR_OK == nds32_walk_page_table(nds32, address, physical))
1692 return ERROR_OK;
1693
1694 return ERROR_FAIL;
1695 }
1696
1697 int nds32_cache_sync(struct target *target, uint32_t address, uint32_t length)
1698 {
1699 struct aice_port_s *aice = target_to_aice(target);
1700 struct nds32 *nds32 = target_to_nds32(target);
1701 struct nds32_cache *dcache = &(nds32->memory.dcache);
1702 struct nds32_cache *icache = &(nds32->memory.icache);
1703 uint32_t dcache_line_size = NDS32_LINE_SIZE_TABLE[dcache->line_size];
1704 uint32_t icache_line_size = NDS32_LINE_SIZE_TABLE[icache->line_size];
1705 uint32_t cur_address;
1706 int result;
1707 uint32_t start_line, end_line;
1708 uint32_t cur_line;
1709
1710 if ((dcache->line_size != 0) && (dcache->enable == true)) {
1711 /* address / dcache_line_size */
1712 start_line = address >> (dcache->line_size + 2);
1713 /* (address + length - 1) / dcache_line_size */
1714 end_line = (address + length - 1) >> (dcache->line_size + 2);
1715
1716 for (cur_address = address, cur_line = start_line ;
1717 cur_line <= end_line ;
1718 cur_address += dcache_line_size, cur_line++) {
1719 /* D$ write back */
1720 result = aice_cache_ctl(aice, AICE_CACHE_CTL_L1D_VA_WB, cur_address);
1721 if (result != ERROR_OK)
1722 return result;
1723
1724 /* D$ invalidate */
1725 result = aice_cache_ctl(aice, AICE_CACHE_CTL_L1D_VA_INVAL, cur_address);
1726 if (result != ERROR_OK)
1727 return result;
1728 }
1729 }
1730
1731 if ((icache->line_size != 0) && (icache->enable == true)) {
1732 /* address / icache_line_size */
1733 start_line = address >> (icache->line_size + 2);
1734 /* (address + length - 1) / icache_line_size */
1735 end_line = (address + length - 1) >> (icache->line_size + 2);
1736
1737 for (cur_address = address, cur_line = start_line ;
1738 cur_line <= end_line ;
1739 cur_address += icache_line_size, cur_line++) {
1740 /* Because PSW.IT is turned off under debug exception, address MUST
1741 * be physical address. L1I_VA_INVALIDATE uses PSW.IT to decide
1742 * address translation or not. */
1743 uint32_t physical_addr;
1744 if (ERROR_FAIL == target->type->virt2phys(target, cur_address,
1745 &physical_addr))
1746 return ERROR_FAIL;
1747
1748 /* I$ invalidate */
1749 result = aice_cache_ctl(aice, AICE_CACHE_CTL_L1I_VA_INVAL, physical_addr);
1750 if (result != ERROR_OK)
1751 return result;
1752 }
1753 }
1754
1755 return ERROR_OK;
1756 }
1757
1758 uint32_t nds32_nextpc(struct nds32 *nds32, int current, uint32_t address)
1759 {
1760 if (!current)
1761 nds32_set_mapped_reg(nds32, PC, address);
1762 else
1763 nds32_get_mapped_reg(nds32, PC, &address);
1764
1765 return address;
1766 }
1767
1768 int nds32_step(struct target *target, int current,
1769 uint32_t address, int handle_breakpoints)
1770 {
1771 LOG_DEBUG("target->state: %s",
1772 target_state_name(target));
1773
1774 if (target->state != TARGET_HALTED) {
1775 LOG_WARNING("target was not halted");
1776 return ERROR_TARGET_NOT_HALTED;
1777 }
1778
1779 struct nds32 *nds32 = target_to_nds32(target);
1780
1781 address = nds32_nextpc(nds32, current, address);
1782
1783 LOG_DEBUG("STEP PC %08" PRIx32 "%s", address, !current ? "!" : "");
1784
1785 /** set DSSIM */
1786 uint32_t ir14_value;
1787 nds32_get_mapped_reg(nds32, IR14, &ir14_value);
1788 if (nds32->step_isr_enable)
1789 ir14_value |= (0x1 << 31);
1790 else
1791 ir14_value &= ~(0x1 << 31);
1792 nds32_set_mapped_reg(nds32, IR14, ir14_value);
1793
1794 /* check hit_syscall before leave_debug_state() because
1795 * leave_debug_state() may clear hit_syscall flag */
1796 bool no_step = false;
1797 if (nds32->hit_syscall)
1798 /* step after hit_syscall should be ignored because
1799 * leave_debug_state will step implicitly to skip the
1800 * syscall */
1801 no_step = true;
1802
1803 /********* TODO: maybe create another function to handle this part */
1804 CHECK_RETVAL(nds32->leave_debug_state(nds32, true));
1805 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_RESUMED));
1806
1807 if (no_step == false) {
1808 struct aice_port_s *aice = target_to_aice(target);
1809 if (ERROR_OK != aice_step(aice))
1810 return ERROR_FAIL;
1811 }
1812
1813 /* save state */
1814 CHECK_RETVAL(nds32->enter_debug_state(nds32, true));
1815 /********* TODO: maybe create another function to handle this part */
1816
1817 /* restore DSSIM */
1818 if (nds32->step_isr_enable) {
1819 nds32_get_mapped_reg(nds32, IR14, &ir14_value);
1820 ir14_value &= ~(0x1 << 31);
1821 nds32_set_mapped_reg(nds32, IR14, ir14_value);
1822 }
1823
1824 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_HALTED));
1825
1826 return ERROR_OK;
1827 }
1828
1829 static int nds32_step_without_watchpoint(struct nds32 *nds32)
1830 {
1831 struct target *target = nds32->target;
1832
1833 if (target->state != TARGET_HALTED) {
1834 LOG_WARNING("target was not halted");
1835 return ERROR_TARGET_NOT_HALTED;
1836 }
1837
1838 /** set DSSIM */
1839 uint32_t ir14_value;
1840 nds32_get_mapped_reg(nds32, IR14, &ir14_value);
1841 if (nds32->step_isr_enable)
1842 ir14_value |= (0x1 << 31);
1843 else
1844 ir14_value &= ~(0x1 << 31);
1845 nds32_set_mapped_reg(nds32, IR14, ir14_value);
1846
1847 /********* TODO: maybe create another function to handle this part */
1848 CHECK_RETVAL(nds32->leave_debug_state(nds32, false));
1849
1850 struct aice_port_s *aice = target_to_aice(target);
1851
1852 if (ERROR_OK != aice_step(aice))
1853 return ERROR_FAIL;
1854
1855 /* save state */
1856 CHECK_RETVAL(nds32->enter_debug_state(nds32, false));
1857 /********* TODO: maybe create another function to handle this part */
1858
1859 /* restore DSSIM */
1860 if (nds32->step_isr_enable) {
1861 nds32_get_mapped_reg(nds32, IR14, &ir14_value);
1862 ir14_value &= ~(0x1 << 31);
1863 nds32_set_mapped_reg(nds32, IR14, ir14_value);
1864 }
1865
1866 return ERROR_OK;
1867 }
1868
1869 int nds32_target_state(struct nds32 *nds32, enum target_state *state)
1870 {
1871 struct aice_port_s *aice = target_to_aice(nds32->target);
1872 enum aice_target_state_s nds32_state;
1873
1874 if (aice_state(aice, &nds32_state) != ERROR_OK)
1875 return ERROR_FAIL;
1876
1877 switch (nds32_state) {
1878 case AICE_DISCONNECT:
1879 LOG_INFO("USB is disconnected");
1880 return ERROR_FAIL;
1881 case AICE_TARGET_DETACH:
1882 LOG_INFO("Target is disconnected");
1883 return ERROR_FAIL;
1884 case AICE_TARGET_UNKNOWN:
1885 *state = TARGET_UNKNOWN;
1886 break;
1887 case AICE_TARGET_RUNNING:
1888 *state = TARGET_RUNNING;
1889 break;
1890 case AICE_TARGET_HALTED:
1891 *state = TARGET_HALTED;
1892 break;
1893 case AICE_TARGET_RESET:
1894 *state = TARGET_RESET;
1895 break;
1896 case AICE_TARGET_DEBUG_RUNNING:
1897 *state = TARGET_DEBUG_RUNNING;
1898 break;
1899 default:
1900 return ERROR_FAIL;
1901 }
1902
1903 return ERROR_OK;
1904 }
1905
1906 int nds32_examine_debug_reason(struct nds32 *nds32)
1907 {
1908 uint32_t reason;
1909 struct target *target = nds32->target;
1910
1911 if (nds32->hit_syscall == true) {
1912 LOG_DEBUG("Hit syscall breakpoint");
1913 target->debug_reason = DBG_REASON_BREAKPOINT;
1914 return ERROR_OK;
1915 }
1916
1917 nds32->get_debug_reason(nds32, &reason);
1918
1919 LOG_DEBUG("nds32 examines debug reason: %s", nds32_debug_type_name[reason]);
1920
1921 /* Examine debug reason */
1922 switch (reason) {
1923 case NDS32_DEBUG_BREAK:
1924 case NDS32_DEBUG_BREAK_16:
1925 case NDS32_DEBUG_INST_BREAK:
1926 {
1927 uint32_t value_pc;
1928 uint32_t opcode;
1929 struct nds32_instruction instruction;
1930
1931 nds32_get_mapped_reg(nds32, PC, &value_pc);
1932
1933 if (ERROR_OK != nds32_read_opcode(nds32, value_pc, &opcode))
1934 return ERROR_FAIL;
1935 if (ERROR_OK != nds32_evaluate_opcode(nds32, opcode, value_pc,
1936 &instruction))
1937 return ERROR_FAIL;
1938
1939 /* hit 'break 0x7FFF' */
1940 if ((instruction.info.opc_6 == 0x32) &&
1941 (instruction.info.sub_opc == 0xA) &&
1942 (instruction.info.imm == 0x7FFF)) {
1943 target->debug_reason = DBG_REASON_EXIT;
1944 } else
1945 target->debug_reason = DBG_REASON_BREAKPOINT;
1946 }
1947 break;
1948 case NDS32_DEBUG_DATA_ADDR_WATCHPOINT_PRECISE:
1949 case NDS32_DEBUG_DATA_VALUE_WATCHPOINT_PRECISE:
1950 case NDS32_DEBUG_LOAD_STORE_GLOBAL_STOP: /* GLOBAL_STOP is precise exception */
1951 {
1952 int result;
1953
1954 result = nds32->get_watched_address(nds32,
1955 &(nds32->watched_address), reason);
1956 /* do single step(without watchpoints) to skip the "watched" instruction */
1957 nds32_step_without_watchpoint(nds32);
1958
1959 /* before single_step, save exception address */
1960 if (ERROR_OK != result)
1961 return ERROR_FAIL;
1962
1963 target->debug_reason = DBG_REASON_WATCHPOINT;
1964 }
1965 break;
1966 case NDS32_DEBUG_DEBUG_INTERRUPT:
1967 target->debug_reason = DBG_REASON_DBGRQ;
1968 break;
1969 case NDS32_DEBUG_HARDWARE_SINGLE_STEP:
1970 target->debug_reason = DBG_REASON_SINGLESTEP;
1971 break;
1972 case NDS32_DEBUG_DATA_VALUE_WATCHPOINT_IMPRECISE:
1973 case NDS32_DEBUG_DATA_ADDR_WATCHPOINT_NEXT_PRECISE:
1974 case NDS32_DEBUG_DATA_VALUE_WATCHPOINT_NEXT_PRECISE:
1975 if (ERROR_OK != nds32->get_watched_address(nds32,
1976 &(nds32->watched_address), reason))
1977 return ERROR_FAIL;
1978
1979 target->debug_reason = DBG_REASON_WATCHPOINT;
1980 break;
1981 default:
1982 target->debug_reason = DBG_REASON_UNDEFINED;
1983 break;
1984 }
1985
1986 return ERROR_OK;
1987 }
1988
1989 int nds32_login(struct nds32 *nds32)
1990 {
1991 struct target *target = nds32->target;
1992 struct aice_port_s *aice = target_to_aice(target);
1993 uint32_t passcode_length;
1994 char command_sequence[129];
1995 char command_str[33];
1996 char code_str[9];
1997 uint32_t copy_length;
1998 uint32_t code;
1999 uint32_t i;
2000
2001 LOG_DEBUG("nds32_login");
2002
2003 if (nds32->edm_passcode != NULL) {
2004 /* convert EDM passcode to command sequences */
2005 passcode_length = strlen(nds32->edm_passcode);
2006 command_sequence[0] = '\0';
2007 for (i = 0; i < passcode_length; i += 8) {
2008 if (passcode_length - i < 8)
2009 copy_length = passcode_length - i;
2010 else
2011 copy_length = 8;
2012
2013 strncpy(code_str, nds32->edm_passcode + i, copy_length);
2014 code_str[copy_length] = '\0';
2015 code = strtoul(code_str, NULL, 16);
2016
2017 sprintf(command_str, "write_misc gen_port0 0x%" PRIx32 ";", code);
2018 strcat(command_sequence, command_str);
2019 }
2020
2021 if (ERROR_OK != aice_program_edm(aice, command_sequence))
2022 return ERROR_FAIL;
2023
2024 /* get current privilege level */
2025 uint32_t value_edmsw;
2026 aice_read_debug_reg(aice, NDS_EDM_SR_EDMSW, &value_edmsw);
2027 nds32->privilege_level = (value_edmsw >> 16) & 0x3;
2028 LOG_INFO("Current privilege level: %d", nds32->privilege_level);
2029 }
2030
2031 if (nds32_edm_ops_num > 0) {
2032 const char *reg_name;
2033 for (i = 0 ; i < nds32_edm_ops_num ; i++) {
2034 code = nds32_edm_ops[i].value;
2035 if (nds32_edm_ops[i].reg_no == 6)
2036 reg_name = "gen_port0";
2037 else if (nds32_edm_ops[i].reg_no == 7)
2038 reg_name = "gen_port1";
2039 else
2040 return ERROR_FAIL;
2041
2042 sprintf(command_str, "write_misc %s 0x%" PRIx32 ";", reg_name, code);
2043 if (ERROR_OK != aice_program_edm(aice, command_str))
2044 return ERROR_FAIL;
2045 }
2046 }
2047
2048 return ERROR_OK;
2049 }
2050
2051 int nds32_halt(struct target *target)
2052 {
2053 struct nds32 *nds32 = target_to_nds32(target);
2054 struct aice_port_s *aice = target_to_aice(target);
2055 enum target_state state;
2056
2057 LOG_DEBUG("target->state: %s",
2058 target_state_name(target));
2059
2060 if (target->state == TARGET_HALTED) {
2061 LOG_DEBUG("target was already halted");
2062 return ERROR_OK;
2063 }
2064
2065 if (nds32_target_state(nds32, &state) != ERROR_OK)
2066 return ERROR_FAIL;
2067
2068 if (TARGET_HALTED != state)
2069 /* TODO: if state == TARGET_HALTED, check ETYPE is DBGI or not */
2070 if (ERROR_OK != aice_halt(aice))
2071 return ERROR_FAIL;
2072
2073 CHECK_RETVAL(nds32->enter_debug_state(nds32, true));
2074
2075 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_HALTED));
2076
2077 return ERROR_OK;
2078 }
2079
2080 /* poll current target status */
2081 int nds32_poll(struct target *target)
2082 {
2083 struct nds32 *nds32 = target_to_nds32(target);
2084 enum target_state state;
2085
2086 if (nds32_target_state(nds32, &state) != ERROR_OK)
2087 return ERROR_FAIL;
2088
2089 if (state == TARGET_HALTED) {
2090 if (target->state != TARGET_HALTED) {
2091 /* if false_hit, continue free_run */
2092 if (ERROR_OK != nds32->enter_debug_state(nds32, true)) {
2093 struct aice_port_s *aice = target_to_aice(target);
2094 aice_run(aice);
2095 return ERROR_OK;
2096 }
2097
2098 LOG_DEBUG("Change target state to TARGET_HALTED.");
2099
2100 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
2101 }
2102 } else if (state == TARGET_RESET) {
2103 if (target->state == TARGET_HALTED) {
2104 /* similar to assert srst */
2105 register_cache_invalidate(nds32->core_cache);
2106 target->state = TARGET_RESET;
2107
2108 /* TODO: deassert srst */
2109 } else if (target->state == TARGET_RUNNING) {
2110 /* reset as running */
2111 LOG_WARNING("<-- TARGET WARNING! The debug target has been reset. -->");
2112 }
2113 } else {
2114 if (target->state != TARGET_RUNNING && target->state != TARGET_DEBUG_RUNNING) {
2115 LOG_DEBUG("Change target state to TARGET_RUNNING.");
2116 target->state = TARGET_RUNNING;
2117 target->debug_reason = DBG_REASON_NOTHALTED;
2118 }
2119 }
2120
2121 return ERROR_OK;
2122 }
2123
2124 int nds32_resume(struct target *target, int current,
2125 uint32_t address, int handle_breakpoints, int debug_execution)
2126 {
2127 LOG_DEBUG("current %d address %08" PRIx32
2128 " handle_breakpoints %d"
2129 " debug_execution %d",
2130 current, address, handle_breakpoints, debug_execution);
2131
2132 struct nds32 *nds32 = target_to_nds32(target);
2133
2134 if (target->state != TARGET_HALTED) {
2135 LOG_ERROR("Target not halted");
2136 return ERROR_TARGET_NOT_HALTED;
2137 }
2138
2139 address = nds32_nextpc(nds32, current, address);
2140
2141 LOG_DEBUG("RESUME PC %08" PRIx32 "%s", address, !current ? "!" : "");
2142
2143 if (!debug_execution)
2144 target_free_all_working_areas(target);
2145
2146 /* Disable HSS to avoid users misuse HSS */
2147 if (nds32_reach_max_interrupt_level(nds32) == false) {
2148 uint32_t value_ir0;
2149 nds32_get_mapped_reg(nds32, IR0, &value_ir0);
2150 value_ir0 &= ~(0x1 << 11);
2151 nds32_set_mapped_reg(nds32, IR0, value_ir0);
2152 }
2153
2154 CHECK_RETVAL(nds32->leave_debug_state(nds32, true));
2155 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_RESUMED));
2156
2157 if (nds32->virtual_hosting_ctrl_c == false) {
2158 struct aice_port_s *aice = target_to_aice(target);
2159 aice_run(aice);
2160 } else
2161 nds32->virtual_hosting_ctrl_c = false;
2162
2163 target->debug_reason = DBG_REASON_NOTHALTED;
2164 if (!debug_execution)
2165 target->state = TARGET_RUNNING;
2166 else
2167 target->state = TARGET_DEBUG_RUNNING;
2168
2169 LOG_DEBUG("target->state: %s",
2170 target_state_name(target));
2171
2172 return ERROR_OK;
2173 }
2174
2175 static int nds32_soft_reset_halt(struct target *target)
2176 {
2177 /* TODO: test it */
2178 struct nds32 *nds32 = target_to_nds32(target);
2179 struct aice_port_s *aice = target_to_aice(target);
2180
2181 aice_assert_srst(aice, AICE_SRST);
2182
2183 /* halt core and set pc to 0x0 */
2184 int retval = target_halt(target);
2185 if (retval != ERROR_OK)
2186 return retval;
2187
2188 /* start fetching from IVB */
2189 uint32_t value_ir3;
2190 nds32_get_mapped_reg(nds32, IR3, &value_ir3);
2191 nds32_set_mapped_reg(nds32, PC, value_ir3 & 0xFFFF0000);
2192
2193 return ERROR_OK;
2194 }
2195
2196 int nds32_assert_reset(struct target *target)
2197 {
2198 struct nds32 *nds32 = target_to_nds32(target);
2199 struct aice_port_s *aice = target_to_aice(target);
2200 struct nds32_cpu_version *cpu_version = &(nds32->cpu_version);
2201
2202 if (target->reset_halt) {
2203 if ((nds32->soft_reset_halt)
2204 || (nds32->edm.version < 0x51)
2205 || ((nds32->edm.version == 0x51)
2206 && (cpu_version->revision == 0x1C)
2207 && (cpu_version->cpu_id_family == 0xC)
2208 && (cpu_version->cpu_id_version == 0x0)))
2209 nds32_soft_reset_halt(target);
2210 else
2211 aice_assert_srst(aice, AICE_RESET_HOLD);
2212 } else {
2213 aice_assert_srst(aice, AICE_SRST);
2214 alive_sleep(nds32->boot_time);
2215 }
2216
2217 /* set passcode for secure MCU after core reset */
2218 nds32_login(nds32);
2219
2220 /* registers are now invalid */
2221 register_cache_invalidate(nds32->core_cache);
2222
2223 target->state = TARGET_RESET;
2224
2225 return ERROR_OK;
2226 }
2227
2228 static int nds32_gdb_attach(struct nds32 *nds32)
2229 {
2230 LOG_DEBUG("nds32_gdb_attach, target coreid: %" PRId32, nds32->target->coreid);
2231
2232 if (nds32->attached == false) {
2233
2234 if (nds32->keep_target_edm_ctl) {
2235 /* backup target EDM_CTL */
2236 struct aice_port_s *aice = target_to_aice(nds32->target);
2237 aice_read_debug_reg(aice, NDS_EDM_SR_EDM_CTL, &nds32->backup_edm_ctl);
2238 }
2239
2240 target_halt(nds32->target);
2241
2242 nds32->attached = true;
2243 }
2244
2245 return ERROR_OK;
2246 }
2247
2248 static int nds32_gdb_detach(struct nds32 *nds32)
2249 {
2250 LOG_DEBUG("nds32_gdb_detach");
2251 bool backup_virtual_hosting_setting;
2252
2253 if (nds32->attached) {
2254
2255 backup_virtual_hosting_setting = nds32->virtual_hosting;
2256 /* turn off virtual hosting before resume as gdb-detach */
2257 nds32->virtual_hosting = false;
2258 target_resume(nds32->target, 1, 0, 0, 0);
2259 nds32->virtual_hosting = backup_virtual_hosting_setting;
2260
2261 if (nds32->keep_target_edm_ctl) {
2262 /* restore target EDM_CTL */
2263 struct aice_port_s *aice = target_to_aice(nds32->target);
2264 aice_write_debug_reg(aice, NDS_EDM_SR_EDM_CTL, nds32->backup_edm_ctl);
2265 }
2266
2267 nds32->attached = false;
2268 }
2269
2270 return ERROR_OK;
2271 }
2272
2273 static int nds32_callback_event_handler(struct target *target,
2274 enum target_event event, void *priv)
2275 {
2276 int retval = ERROR_OK;
2277 int target_number = *(int *)priv;
2278
2279 if (target_number != target->target_number)
2280 return ERROR_OK;
2281
2282 struct nds32 *nds32 = target_to_nds32(target);
2283
2284 switch (event) {
2285 case TARGET_EVENT_GDB_ATTACH:
2286 retval = nds32_gdb_attach(nds32);
2287 break;
2288 case TARGET_EVENT_GDB_DETACH:
2289 retval = nds32_gdb_detach(nds32);
2290 break;
2291 default:
2292 break;
2293 }
2294
2295 return retval;
2296 }
2297
2298 int nds32_init(struct nds32 *nds32)
2299 {
2300 /* Initialize anything we can set up without talking to the target */
2301 nds32->memory.access_channel = NDS_MEMORY_ACC_CPU;
2302
2303 /* register event callback */
2304 target_register_event_callback(nds32_callback_event_handler,
2305 &(nds32->target->target_number));
2306
2307 return ERROR_OK;
2308 }
2309
2310 int nds32_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
2311 {
2312 /* fill syscall parameters to file-I/O info */
2313 if (NULL == fileio_info) {
2314 LOG_ERROR("Target has not initial file-I/O data structure");
2315 return ERROR_FAIL;
2316 }
2317
2318 struct nds32 *nds32 = target_to_nds32(target);
2319 uint32_t value_ir6;
2320 uint32_t syscall_id;
2321
2322 if (nds32->hit_syscall == false)
2323 return ERROR_FAIL;
2324
2325 nds32_get_mapped_reg(nds32, IR6, &value_ir6);
2326 syscall_id = (value_ir6 >> 16) & 0x7FFF;
2327 nds32->active_syscall_id = syscall_id;
2328
2329 LOG_DEBUG("hit syscall ID: 0x%" PRIx32, syscall_id);
2330
2331 /* free previous identifier storage */
2332 if (NULL != fileio_info->identifier) {
2333 free(fileio_info->identifier);
2334 fileio_info->identifier = NULL;
2335 }
2336
2337 switch (syscall_id) {
2338 case NDS32_SYSCALL_EXIT:
2339 fileio_info->identifier = malloc(5);
2340 sprintf(fileio_info->identifier, "exit");
2341 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2342 break;
2343 case NDS32_SYSCALL_OPEN:
2344 {
2345 uint8_t filename[256];
2346 fileio_info->identifier = malloc(5);
2347 sprintf(fileio_info->identifier, "open");
2348 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2349 /* reserve fileio_info->param_2 for length of path */
2350 nds32_get_mapped_reg(nds32, R1, &(fileio_info->param_3));
2351 nds32_get_mapped_reg(nds32, R2, &(fileio_info->param_4));
2352
2353 target->type->read_buffer(target, fileio_info->param_1,
2354 256, filename);
2355 fileio_info->param_2 = strlen((char *)filename) + 1;
2356 }
2357 break;
2358 case NDS32_SYSCALL_CLOSE:
2359 fileio_info->identifier = malloc(6);
2360 sprintf(fileio_info->identifier, "close");
2361 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2362 break;
2363 case NDS32_SYSCALL_READ:
2364 fileio_info->identifier = malloc(5);
2365 sprintf(fileio_info->identifier, "read");
2366 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2367 nds32_get_mapped_reg(nds32, R1, &(fileio_info->param_2));
2368 nds32_get_mapped_reg(nds32, R2, &(fileio_info->param_3));
2369 break;
2370 case NDS32_SYSCALL_WRITE:
2371 fileio_info->identifier = malloc(6);
2372 sprintf(fileio_info->identifier, "write");
2373 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2374 nds32_get_mapped_reg(nds32, R1, &(fileio_info->param_2));
2375 nds32_get_mapped_reg(nds32, R2, &(fileio_info->param_3));
2376 break;
2377 case NDS32_SYSCALL_LSEEK:
2378 fileio_info->identifier = malloc(6);
2379 sprintf(fileio_info->identifier, "lseek");
2380 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2381 nds32_get_mapped_reg(nds32, R1, &(fileio_info->param_2));
2382 nds32_get_mapped_reg(nds32, R2, &(fileio_info->param_3));
2383 break;
2384 case NDS32_SYSCALL_UNLINK:
2385 {
2386 uint8_t filename[256];
2387 fileio_info->identifier = malloc(7);
2388 sprintf(fileio_info->identifier, "unlink");
2389 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2390 /* reserve fileio_info->param_2 for length of path */
2391
2392 target->type->read_buffer(target, fileio_info->param_1,
2393 256, filename);
2394 fileio_info->param_2 = strlen((char *)filename) + 1;
2395 }
2396 break;
2397 case NDS32_SYSCALL_RENAME:
2398 {
2399 uint8_t filename[256];
2400 fileio_info->identifier = malloc(7);
2401 sprintf(fileio_info->identifier, "rename");
2402 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2403 /* reserve fileio_info->param_2 for length of old path */
2404 nds32_get_mapped_reg(nds32, R1, &(fileio_info->param_3));
2405 /* reserve fileio_info->param_4 for length of new path */
2406
2407 target->type->read_buffer(target, fileio_info->param_1,
2408 256, filename);
2409 fileio_info->param_2 = strlen((char *)filename) + 1;
2410
2411 target->type->read_buffer(target, fileio_info->param_3,
2412 256, filename);
2413 fileio_info->param_4 = strlen((char *)filename) + 1;
2414 }
2415 break;
2416 case NDS32_SYSCALL_FSTAT:
2417 fileio_info->identifier = malloc(6);
2418 sprintf(fileio_info->identifier, "fstat");
2419 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2420 nds32_get_mapped_reg(nds32, R1, &(fileio_info->param_2));
2421 break;
2422 case NDS32_SYSCALL_STAT:
2423 {
2424 uint8_t filename[256];
2425 fileio_info->identifier = malloc(5);
2426 sprintf(fileio_info->identifier, "stat");
2427 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2428 /* reserve fileio_info->param_2 for length of old path */
2429 nds32_get_mapped_reg(nds32, R1, &(fileio_info->param_3));
2430
2431 target->type->read_buffer(target, fileio_info->param_1,
2432 256, filename);
2433 fileio_info->param_2 = strlen((char *)filename) + 1;
2434 }
2435 break;
2436 case NDS32_SYSCALL_GETTIMEOFDAY:
2437 fileio_info->identifier = malloc(13);
2438 sprintf(fileio_info->identifier, "gettimeofday");
2439 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2440 nds32_get_mapped_reg(nds32, R1, &(fileio_info->param_2));
2441 break;
2442 case NDS32_SYSCALL_ISATTY:
2443 fileio_info->identifier = malloc(7);
2444 sprintf(fileio_info->identifier, "isatty");
2445 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2446 break;
2447 case NDS32_SYSCALL_SYSTEM:
2448 {
2449 uint8_t command[256];
2450 fileio_info->identifier = malloc(7);
2451 sprintf(fileio_info->identifier, "system");
2452 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2453 /* reserve fileio_info->param_2 for length of old path */
2454
2455 target->type->read_buffer(target, fileio_info->param_1,
2456 256, command);
2457 fileio_info->param_2 = strlen((char *)command) + 1;
2458 }
2459 break;
2460 case NDS32_SYSCALL_ERRNO:
2461 fileio_info->identifier = malloc(6);
2462 sprintf(fileio_info->identifier, "errno");
2463 nds32_set_mapped_reg(nds32, R0, nds32->virtual_hosting_errno);
2464 break;
2465 default:
2466 fileio_info->identifier = malloc(8);
2467 sprintf(fileio_info->identifier, "unknown");
2468 break;
2469 }
2470
2471 return ERROR_OK;
2472 }
2473
2474 int nds32_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
2475 {
2476 LOG_DEBUG("syscall return code: 0x%x, errno: 0x%x , ctrl_c: %s",
2477 retcode, fileio_errno, ctrl_c ? "true" : "false");
2478
2479 struct nds32 *nds32 = target_to_nds32(target);
2480
2481 nds32_set_mapped_reg(nds32, R0, (uint32_t)retcode);
2482
2483 nds32->virtual_hosting_errno = fileio_errno;
2484 nds32->virtual_hosting_ctrl_c = ctrl_c;
2485 nds32->active_syscall_id = NDS32_SYSCALL_UNDEFINED;
2486
2487 return ERROR_OK;
2488 }
2489
2490 int nds32_profiling(struct target *target, uint32_t *samples,
2491 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2492 {
2493 /* sample $PC every 10 milliseconds */
2494 uint32_t iteration = seconds * 100;
2495 struct aice_port_s *aice = target_to_aice(target);
2496 struct nds32 *nds32 = target_to_nds32(target);
2497
2498 if (max_num_samples < iteration)
2499 iteration = max_num_samples;
2500
2501 int pc_regnum = nds32->register_map(nds32, PC);
2502 aice_profiling(aice, 10, iteration, pc_regnum, samples, num_samples);
2503
2504 register_cache_invalidate(nds32->core_cache);
2505
2506 return ERROR_OK;
2507 }
2508
2509 int nds32_gdb_fileio_write_memory(struct nds32 *nds32, uint32_t address,
2510 uint32_t size, const uint8_t *buffer)
2511 {
2512 if ((NDS32_SYSCALL_FSTAT == nds32->active_syscall_id) ||
2513 (NDS32_SYSCALL_STAT == nds32->active_syscall_id)) {
2514 /* If doing GDB file-I/O, target should convert 'struct stat'
2515 * from gdb-format to target-format */
2516 uint8_t stat_buffer[NDS32_STRUCT_STAT_SIZE];
2517 /* st_dev 2 */
2518 stat_buffer[0] = buffer[3];
2519 stat_buffer[1] = buffer[2];
2520 /* st_ino 2 */
2521 stat_buffer[2] = buffer[7];
2522 stat_buffer[3] = buffer[6];
2523 /* st_mode 4 */
2524 stat_buffer[4] = buffer[11];
2525 stat_buffer[5] = buffer[10];
2526 stat_buffer[6] = buffer[9];
2527 stat_buffer[7] = buffer[8];
2528 /* st_nlink 2 */
2529 stat_buffer[8] = buffer[15];
2530 stat_buffer[9] = buffer[16];
2531 /* st_uid 2 */
2532 stat_buffer[10] = buffer[19];
2533 stat_buffer[11] = buffer[18];
2534 /* st_gid 2 */
2535 stat_buffer[12] = buffer[23];
2536 stat_buffer[13] = buffer[22];
2537 /* st_rdev 2 */
2538 stat_buffer[14] = buffer[27];
2539 stat_buffer[15] = buffer[26];
2540 /* st_size 4 */
2541 stat_buffer[16] = buffer[35];
2542 stat_buffer[17] = buffer[34];
2543 stat_buffer[18] = buffer[33];
2544 stat_buffer[19] = buffer[32];
2545 /* st_atime 4 */
2546 stat_buffer[20] = buffer[55];
2547 stat_buffer[21] = buffer[54];
2548 stat_buffer[22] = buffer[53];
2549 stat_buffer[23] = buffer[52];
2550 /* st_spare1 4 */
2551 stat_buffer[24] = 0;
2552 stat_buffer[25] = 0;
2553 stat_buffer[26] = 0;
2554 stat_buffer[27] = 0;
2555 /* st_mtime 4 */
2556 stat_buffer[28] = buffer[59];
2557 stat_buffer[29] = buffer[58];
2558 stat_buffer[30] = buffer[57];
2559 stat_buffer[31] = buffer[56];
2560 /* st_spare2 4 */
2561 stat_buffer[32] = 0;
2562 stat_buffer[33] = 0;
2563 stat_buffer[34] = 0;
2564 stat_buffer[35] = 0;
2565 /* st_ctime 4 */
2566 stat_buffer[36] = buffer[63];
2567 stat_buffer[37] = buffer[62];
2568 stat_buffer[38] = buffer[61];
2569 stat_buffer[39] = buffer[60];
2570 /* st_spare3 4 */
2571 stat_buffer[40] = 0;
2572 stat_buffer[41] = 0;
2573 stat_buffer[42] = 0;
2574 stat_buffer[43] = 0;
2575 /* st_blksize 4 */
2576 stat_buffer[44] = buffer[43];
2577 stat_buffer[45] = buffer[42];
2578 stat_buffer[46] = buffer[41];
2579 stat_buffer[47] = buffer[40];
2580 /* st_blocks 4 */
2581 stat_buffer[48] = buffer[51];
2582 stat_buffer[49] = buffer[50];
2583 stat_buffer[50] = buffer[49];
2584 stat_buffer[51] = buffer[48];
2585 /* st_spare4 8 */
2586 stat_buffer[52] = 0;
2587 stat_buffer[53] = 0;
2588 stat_buffer[54] = 0;
2589 stat_buffer[55] = 0;
2590 stat_buffer[56] = 0;
2591 stat_buffer[57] = 0;
2592 stat_buffer[58] = 0;
2593 stat_buffer[59] = 0;
2594
2595 return nds32_write_buffer(nds32->target, address, NDS32_STRUCT_STAT_SIZE, stat_buffer);
2596 } else if (NDS32_SYSCALL_GETTIMEOFDAY == nds32->active_syscall_id) {
2597 /* If doing GDB file-I/O, target should convert 'struct timeval'
2598 * from gdb-format to target-format */
2599 uint8_t timeval_buffer[NDS32_STRUCT_TIMEVAL_SIZE];
2600 timeval_buffer[0] = buffer[3];
2601 timeval_buffer[1] = buffer[2];
2602 timeval_buffer[2] = buffer[1];
2603 timeval_buffer[3] = buffer[0];
2604 timeval_buffer[4] = buffer[11];
2605 timeval_buffer[5] = buffer[10];
2606 timeval_buffer[6] = buffer[9];
2607 timeval_buffer[7] = buffer[8];
2608
2609 return nds32_write_buffer(nds32->target, address, NDS32_STRUCT_TIMEVAL_SIZE, timeval_buffer);
2610 }
2611
2612 return nds32_write_buffer(nds32->target, address, size, buffer);
2613 }
2614
2615 int nds32_reset_halt(struct nds32 *nds32)
2616 {
2617 LOG_INFO("reset halt as init");
2618
2619 struct aice_port_s *aice = target_to_aice(nds32->target);
2620 aice_assert_srst(aice, AICE_RESET_HOLD);
2621
2622 return ERROR_OK;
2623 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)