openocd: fix simple cases of Yoda condition
[openocd.git] / src / target / nds32.c
1 /***************************************************************************
2 * Copyright (C) 2013 Andes Technology *
3 * Hsiangkai Wang <hkwang@andestech.com> *
4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
17 ***************************************************************************/
18
19 #ifdef HAVE_CONFIG_H
20 #include "config.h"
21 #endif
22
23 #include <helper/log.h>
24 #include <helper/binarybuffer.h>
25 #include "nds32.h"
26 #include "nds32_aice.h"
27 #include "nds32_tlb.h"
28 #include "nds32_disassembler.h"
29
30 struct nds32_edm_operation nds32_edm_ops[NDS32_EDM_OPERATION_MAX_NUM];
31 uint32_t nds32_edm_ops_num;
32
33 const char *nds32_debug_type_name[11] = {
34 "SOFTWARE BREAK",
35 "SOFTWARE BREAK_16",
36 "HARDWARE BREAKPOINT",
37 "DATA ADDR WATCHPOINT PRECISE",
38 "DATA VALUE WATCHPOINT PRECISE",
39 "DATA VALUE WATCHPOINT IMPRECISE",
40 "DEBUG INTERRUPT",
41 "HARDWARE SINGLE STEP",
42 "DATA ADDR WATCHPOINT NEXT PRECISE",
43 "DATA VALUE WATCHPOINT NEXT PRECISE",
44 "LOAD STORE GLOBAL STOP",
45 };
46
47 static const int nds32_lm_size_table[16] = {
48 4 * 1024,
49 8 * 1024,
50 16 * 1024,
51 32 * 1024,
52 64 * 1024,
53 128 * 1024,
54 256 * 1024,
55 512 * 1024,
56 1024 * 1024,
57 1 * 1024,
58 2 * 1024,
59 };
60
61 static const int nds32_line_size_table[6] = {
62 0,
63 8,
64 16,
65 32,
66 64,
67 128,
68 };
69
70 static int nds32_get_core_reg(struct reg *reg)
71 {
72 int retval;
73 struct nds32_reg *reg_arch_info = reg->arch_info;
74 struct target *target = reg_arch_info->target;
75 struct nds32 *nds32 = target_to_nds32(target);
76 struct aice_port_s *aice = target_to_aice(target);
77
78 if (target->state != TARGET_HALTED) {
79 LOG_ERROR("Target not halted");
80 return ERROR_TARGET_NOT_HALTED;
81 }
82
83 if (reg->valid) {
84 uint32_t val = buf_get_u32(reg_arch_info->value, 0, 32);
85 LOG_DEBUG("reading register(cached) %" PRIi32 "(%s), value: 0x%8.8" PRIx32,
86 reg_arch_info->num, reg->name, val);
87 return ERROR_OK;
88 }
89
90 int mapped_regnum = nds32->register_map(nds32, reg_arch_info->num);
91
92 if (reg_arch_info->enable == false) {
93 buf_set_u32(reg_arch_info->value, 0, 32, NDS32_REGISTER_DISABLE);
94 retval = ERROR_FAIL;
95 } else {
96 uint32_t val = 0;
97 if ((nds32->fpu_enable == false)
98 && (NDS32_REG_TYPE_FPU == nds32_reg_type(mapped_regnum))) {
99 retval = ERROR_OK;
100 } else if ((nds32->audio_enable == false)
101 && (NDS32_REG_TYPE_AUMR == nds32_reg_type(mapped_regnum))) {
102 retval = ERROR_OK;
103 } else {
104 retval = aice_read_register(aice, mapped_regnum, &val);
105 }
106 buf_set_u32(reg_arch_info->value, 0, 32, val);
107
108 LOG_DEBUG("reading register %" PRIi32 "(%s), value: 0x%8.8" PRIx32,
109 reg_arch_info->num, reg->name, val);
110 }
111
112 if (retval == ERROR_OK) {
113 reg->valid = true;
114 reg->dirty = false;
115 }
116
117 return retval;
118 }
119
120 static int nds32_get_core_reg_64(struct reg *reg)
121 {
122 int retval;
123 struct nds32_reg *reg_arch_info = reg->arch_info;
124 struct target *target = reg_arch_info->target;
125 struct nds32 *nds32 = target_to_nds32(target);
126 struct aice_port_s *aice = target_to_aice(target);
127
128 if (target->state != TARGET_HALTED) {
129 LOG_ERROR("Target not halted");
130 return ERROR_TARGET_NOT_HALTED;
131 }
132
133 if (reg->valid)
134 return ERROR_OK;
135
136 if (reg_arch_info->enable == false) {
137 buf_set_u64(reg_arch_info->value, 0, 64, NDS32_REGISTER_DISABLE);
138 retval = ERROR_FAIL;
139 } else {
140 uint64_t val = 0;
141 if ((nds32->fpu_enable == false)
142 && ((FD0 <= reg_arch_info->num) && (reg_arch_info->num <= FD31))) {
143 retval = ERROR_OK;
144 } else {
145 retval = aice_read_reg_64(aice, reg_arch_info->num, &val);
146 }
147 buf_set_u64(reg_arch_info->value, 0, 64, val);
148 }
149
150 if (retval == ERROR_OK) {
151 reg->valid = true;
152 reg->dirty = false;
153 }
154
155 return retval;
156 }
157
158 static int nds32_update_psw(struct nds32 *nds32)
159 {
160 uint32_t value_ir0;
161 struct aice_port_s *aice = target_to_aice(nds32->target);
162
163 nds32_get_mapped_reg(nds32, IR0, &value_ir0);
164
165 /* Save data memory endian */
166 if ((value_ir0 >> 5) & 0x1) {
167 nds32->data_endian = TARGET_BIG_ENDIAN;
168 aice_set_data_endian(aice, AICE_BIG_ENDIAN);
169 } else {
170 nds32->data_endian = TARGET_LITTLE_ENDIAN;
171 aice_set_data_endian(aice, AICE_LITTLE_ENDIAN);
172 }
173
174 /* Save translation status */
175 nds32->memory.address_translation = ((value_ir0 >> 7) & 0x1) ? true : false;
176
177 return ERROR_OK;
178 }
179
180 static int nds32_update_mmu_info(struct nds32 *nds32)
181 {
182 uint32_t value;
183
184 /* Update MMU control status */
185 nds32_get_mapped_reg(nds32, MR0, &value);
186 nds32->mmu_config.default_min_page_size = value & 0x1;
187 nds32->mmu_config.multiple_page_size_in_use = (value >> 10) & 0x1;
188
189 return ERROR_OK;
190 }
191
192 static int nds32_update_cache_info(struct nds32 *nds32)
193 {
194 uint32_t value;
195
196 if (ERROR_OK == nds32_get_mapped_reg(nds32, MR8, &value)) {
197 if (value & 0x1)
198 nds32->memory.icache.enable = true;
199 else
200 nds32->memory.icache.enable = false;
201
202 if (value & 0x2)
203 nds32->memory.dcache.enable = true;
204 else
205 nds32->memory.dcache.enable = false;
206 } else {
207 nds32->memory.icache.enable = false;
208 nds32->memory.dcache.enable = false;
209 }
210
211 return ERROR_OK;
212 }
213
214 static int nds32_update_lm_info(struct nds32 *nds32)
215 {
216 struct nds32_memory *memory = &(nds32->memory);
217 uint32_t value_mr6;
218 uint32_t value_mr7;
219
220 nds32_get_mapped_reg(nds32, MR6, &value_mr6);
221 if (value_mr6 & 0x1)
222 memory->ilm_enable = true;
223 else
224 memory->ilm_enable = false;
225
226 if (memory->ilm_align_ver == 0) { /* 1MB aligned */
227 memory->ilm_start = value_mr6 & 0xFFF00000;
228 memory->ilm_end = memory->ilm_start + memory->ilm_size;
229 } else if (memory->ilm_align_ver == 1) { /* aligned to local memory size */
230 memory->ilm_start = value_mr6 & 0xFFFFFC00;
231 memory->ilm_end = memory->ilm_start + memory->ilm_size;
232 } else {
233 memory->ilm_start = -1;
234 memory->ilm_end = -1;
235 }
236
237 nds32_get_mapped_reg(nds32, MR7, &value_mr7);
238 if (value_mr7 & 0x1)
239 memory->dlm_enable = true;
240 else
241 memory->dlm_enable = false;
242
243 if (memory->dlm_align_ver == 0) { /* 1MB aligned */
244 memory->dlm_start = value_mr7 & 0xFFF00000;
245 memory->dlm_end = memory->dlm_start + memory->dlm_size;
246 } else if (memory->dlm_align_ver == 1) { /* aligned to local memory size */
247 memory->dlm_start = value_mr7 & 0xFFFFFC00;
248 memory->dlm_end = memory->dlm_start + memory->dlm_size;
249 } else {
250 memory->dlm_start = -1;
251 memory->dlm_end = -1;
252 }
253
254 return ERROR_OK;
255 }
256
257 /**
258 * If fpu/audio is disabled, to access fpu/audio registers will cause
259 * exceptions. So, we need to check if fpu/audio is enabled or not as
260 * target is halted. If fpu/audio is disabled, as users access fpu/audio
261 * registers, OpenOCD will return fake value 0 instead of accessing
262 * registers through DIM.
263 */
264 static int nds32_check_extension(struct nds32 *nds32)
265 {
266 uint32_t value;
267
268 nds32_get_mapped_reg(nds32, FUCPR, &value);
269 if (value == NDS32_REGISTER_DISABLE) {
270 nds32->fpu_enable = false;
271 nds32->audio_enable = false;
272 return ERROR_OK;
273 }
274
275 if (value & 0x1)
276 nds32->fpu_enable = true;
277 else
278 nds32->fpu_enable = false;
279
280 if (value & 0x80000000)
281 nds32->audio_enable = true;
282 else
283 nds32->audio_enable = false;
284
285 return ERROR_OK;
286 }
287
288 static int nds32_set_core_reg(struct reg *reg, uint8_t *buf)
289 {
290 struct nds32_reg *reg_arch_info = reg->arch_info;
291 struct target *target = reg_arch_info->target;
292 struct nds32 *nds32 = target_to_nds32(target);
293 struct aice_port_s *aice = target_to_aice(target);
294 uint32_t value = buf_get_u32(buf, 0, 32);
295
296 if (target->state != TARGET_HALTED) {
297 LOG_ERROR("Target not halted");
298 return ERROR_TARGET_NOT_HALTED;
299 }
300
301 int mapped_regnum = nds32->register_map(nds32, reg_arch_info->num);
302
303 /* ignore values that will generate exception */
304 if (nds32_reg_exception(mapped_regnum, value))
305 return ERROR_OK;
306
307 LOG_DEBUG("writing register %" PRIi32 "(%s) with value 0x%8.8" PRIx32,
308 reg_arch_info->num, reg->name, value);
309
310 if ((nds32->fpu_enable == false) &&
311 (NDS32_REG_TYPE_FPU == nds32_reg_type(mapped_regnum))) {
312
313 buf_set_u32(reg->value, 0, 32, 0);
314 } else if ((nds32->audio_enable == false) &&
315 (NDS32_REG_TYPE_AUMR == nds32_reg_type(mapped_regnum))) {
316
317 buf_set_u32(reg->value, 0, 32, 0);
318 } else {
319 buf_set_u32(reg->value, 0, 32, value);
320 uint32_t val = buf_get_u32(reg_arch_info->value, 0, 32);
321 aice_write_register(aice, mapped_regnum, val);
322
323 /* After set value to registers, read the value from target
324 * to avoid W1C inconsistency. */
325 aice_read_register(aice, mapped_regnum, &val);
326 buf_set_u32(reg_arch_info->value, 0, 32, val);
327 }
328
329 reg->valid = true;
330 reg->dirty = false;
331
332 /* update registers to take effect right now */
333 if (mapped_regnum == IR0) {
334 nds32_update_psw(nds32);
335 } else if (mapped_regnum == MR0) {
336 nds32_update_mmu_info(nds32);
337 } else if ((mapped_regnum == MR6) || (mapped_regnum == MR7)) {
338 /* update lm information */
339 nds32_update_lm_info(nds32);
340 } else if (mapped_regnum == MR8) {
341 nds32_update_cache_info(nds32);
342 } else if (mapped_regnum == FUCPR) {
343 /* update audio/fpu setting */
344 nds32_check_extension(nds32);
345 }
346
347 return ERROR_OK;
348 }
349
350 static int nds32_set_core_reg_64(struct reg *reg, uint8_t *buf)
351 {
352 struct nds32_reg *reg_arch_info = reg->arch_info;
353 struct target *target = reg_arch_info->target;
354 struct nds32 *nds32 = target_to_nds32(target);
355 uint32_t low_part = buf_get_u32(buf, 0, 32);
356 uint32_t high_part = buf_get_u32(buf, 32, 32);
357
358 if (target->state != TARGET_HALTED) {
359 LOG_ERROR("Target not halted");
360 return ERROR_TARGET_NOT_HALTED;
361 }
362
363 if ((nds32->fpu_enable == false) &&
364 ((FD0 <= reg_arch_info->num) && (reg_arch_info->num <= FD31))) {
365
366 buf_set_u32(reg->value, 0, 32, 0);
367 buf_set_u32(reg->value, 32, 32, 0);
368
369 reg->valid = true;
370 reg->dirty = false;
371 } else {
372 buf_set_u32(reg->value, 0, 32, low_part);
373 buf_set_u32(reg->value, 32, 32, high_part);
374
375 reg->valid = true;
376 reg->dirty = true;
377 }
378
379 return ERROR_OK;
380 }
381
382 static const struct reg_arch_type nds32_reg_access_type = {
383 .get = nds32_get_core_reg,
384 .set = nds32_set_core_reg,
385 };
386
387 static const struct reg_arch_type nds32_reg_access_type_64 = {
388 .get = nds32_get_core_reg_64,
389 .set = nds32_set_core_reg_64,
390 };
391
392 static struct reg_cache *nds32_build_reg_cache(struct target *target,
393 struct nds32 *nds32)
394 {
395 struct reg_cache *cache = calloc(sizeof(struct reg_cache), 1);
396 struct reg *reg_list = calloc(TOTAL_REG_NUM, sizeof(struct reg));
397 struct nds32_reg *reg_arch_info = calloc(TOTAL_REG_NUM, sizeof(struct nds32_reg));
398 int i;
399
400 if (!cache || !reg_list || !reg_arch_info) {
401 free(cache);
402 free(reg_list);
403 free(reg_arch_info);
404 return NULL;
405 }
406
407 cache->name = "Andes registers";
408 cache->next = NULL;
409 cache->reg_list = reg_list;
410 cache->num_regs = 0;
411
412 for (i = 0; i < TOTAL_REG_NUM; i++) {
413 reg_arch_info[i].num = i;
414 reg_arch_info[i].target = target;
415 reg_arch_info[i].nds32 = nds32;
416 reg_arch_info[i].enable = false;
417
418 reg_list[i].name = nds32_reg_simple_name(i);
419 reg_list[i].number = reg_arch_info[i].num;
420 reg_list[i].size = nds32_reg_size(i);
421 reg_list[i].arch_info = &reg_arch_info[i];
422
423 reg_list[i].reg_data_type = calloc(sizeof(struct reg_data_type), 1);
424
425 if (FD0 <= reg_arch_info[i].num && reg_arch_info[i].num <= FD31) {
426 reg_list[i].value = reg_arch_info[i].value;
427 reg_list[i].type = &nds32_reg_access_type_64;
428
429 reg_list[i].reg_data_type->type = REG_TYPE_IEEE_DOUBLE;
430 reg_list[i].reg_data_type->id = "ieee_double";
431 reg_list[i].group = "float";
432 } else {
433 reg_list[i].value = reg_arch_info[i].value;
434 reg_list[i].type = &nds32_reg_access_type;
435 reg_list[i].group = "general";
436
437 if ((FS0 <= reg_arch_info[i].num) && (reg_arch_info[i].num <= FS31)) {
438 reg_list[i].reg_data_type->type = REG_TYPE_IEEE_SINGLE;
439 reg_list[i].reg_data_type->id = "ieee_single";
440 reg_list[i].group = "float";
441 } else if ((reg_arch_info[i].num == FPCSR) ||
442 (reg_arch_info[i].num == FPCFG)) {
443 reg_list[i].group = "float";
444 } else if ((reg_arch_info[i].num == R28) ||
445 (reg_arch_info[i].num == R29) ||
446 (reg_arch_info[i].num == R31)) {
447 reg_list[i].reg_data_type->type = REG_TYPE_DATA_PTR;
448 reg_list[i].reg_data_type->id = "data_ptr";
449 } else if ((reg_arch_info[i].num == R30) ||
450 (reg_arch_info[i].num == PC)) {
451 reg_list[i].reg_data_type->type = REG_TYPE_CODE_PTR;
452 reg_list[i].reg_data_type->id = "code_ptr";
453 } else {
454 reg_list[i].reg_data_type->type = REG_TYPE_UINT32;
455 reg_list[i].reg_data_type->id = "uint32";
456 }
457 }
458
459 if (R16 <= reg_arch_info[i].num && reg_arch_info[i].num <= R25)
460 reg_list[i].caller_save = true;
461 else
462 reg_list[i].caller_save = false;
463
464 reg_list[i].feature = malloc(sizeof(struct reg_feature));
465
466 if (R0 <= reg_arch_info[i].num && reg_arch_info[i].num <= IFC_LP)
467 reg_list[i].feature->name = "org.gnu.gdb.nds32.core";
468 else if (CR0 <= reg_arch_info[i].num && reg_arch_info[i].num <= SECUR0)
469 reg_list[i].feature->name = "org.gnu.gdb.nds32.system";
470 else if (D0L24 <= reg_arch_info[i].num && reg_arch_info[i].num <= CBE3)
471 reg_list[i].feature->name = "org.gnu.gdb.nds32.audio";
472 else if (FPCSR <= reg_arch_info[i].num && reg_arch_info[i].num <= FD31)
473 reg_list[i].feature->name = "org.gnu.gdb.nds32.fpu";
474
475 cache->num_regs++;
476 }
477
478 nds32->core_cache = cache;
479
480 return cache;
481 }
482
483 static int nds32_reg_cache_init(struct target *target, struct nds32 *nds32)
484 {
485 struct reg_cache *cache;
486
487 cache = nds32_build_reg_cache(target, nds32);
488 if (!cache)
489 return ERROR_FAIL;
490
491 *register_get_last_cache_p(&target->reg_cache) = cache;
492
493 return ERROR_OK;
494 }
495
496 static struct reg *nds32_reg_current(struct nds32 *nds32, unsigned regnum)
497 {
498 struct reg *r;
499
500 r = nds32->core_cache->reg_list + regnum;
501
502 return r;
503 }
504
505 int nds32_full_context(struct nds32 *nds32)
506 {
507 uint32_t value, value_ir0;
508
509 /* save $pc & $psw */
510 nds32_get_mapped_reg(nds32, PC, &value);
511 nds32_get_mapped_reg(nds32, IR0, &value_ir0);
512
513 nds32_update_psw(nds32);
514 nds32_update_mmu_info(nds32);
515 nds32_update_cache_info(nds32);
516 nds32_update_lm_info(nds32);
517
518 nds32_check_extension(nds32);
519
520 return ERROR_OK;
521 }
522
523 /* get register value internally */
524 int nds32_get_mapped_reg(struct nds32 *nds32, unsigned regnum, uint32_t *value)
525 {
526 struct reg_cache *reg_cache = nds32->core_cache;
527 struct reg *r;
528
529 if (regnum > reg_cache->num_regs)
530 return ERROR_FAIL;
531
532 r = nds32_reg_current(nds32, regnum);
533
534 if (ERROR_OK != r->type->get(r))
535 return ERROR_FAIL;
536
537 *value = buf_get_u32(r->value, 0, 32);
538
539 return ERROR_OK;
540 }
541
542 /** set register internally */
543 int nds32_set_mapped_reg(struct nds32 *nds32, unsigned regnum, uint32_t value)
544 {
545 struct reg_cache *reg_cache = nds32->core_cache;
546 struct reg *r;
547 uint8_t set_value[4];
548
549 if (regnum > reg_cache->num_regs)
550 return ERROR_FAIL;
551
552 r = nds32_reg_current(nds32, regnum);
553
554 buf_set_u32(set_value, 0, 32, value);
555
556 return r->type->set(r, set_value);
557 }
558
559 /** get general register list */
560 static int nds32_get_general_reg_list(struct nds32 *nds32,
561 struct reg **reg_list[], int *reg_list_size)
562 {
563 struct reg *reg_current;
564 int i;
565 int current_idx;
566
567 /** freed in gdb_server.c */
568 *reg_list = malloc(sizeof(struct reg *) * (IFC_LP - R0 + 1));
569 current_idx = 0;
570
571 for (i = R0; i < IFC_LP + 1; i++) {
572 reg_current = nds32_reg_current(nds32, i);
573 if (((struct nds32_reg *)reg_current->arch_info)->enable) {
574 (*reg_list)[current_idx] = reg_current;
575 current_idx++;
576 }
577 }
578 *reg_list_size = current_idx;
579
580 return ERROR_OK;
581 }
582
583 /** get all register list */
584 static int nds32_get_all_reg_list(struct nds32 *nds32,
585 struct reg **reg_list[], int *reg_list_size)
586 {
587 struct reg_cache *reg_cache = nds32->core_cache;
588 struct reg *reg_current;
589 unsigned int i;
590
591 *reg_list_size = reg_cache->num_regs;
592
593 /** freed in gdb_server.c */
594 *reg_list = malloc(sizeof(struct reg *) * (*reg_list_size));
595
596 for (i = 0; i < reg_cache->num_regs; i++) {
597 reg_current = nds32_reg_current(nds32, i);
598 reg_current->exist = ((struct nds32_reg *)
599 reg_current->arch_info)->enable;
600 (*reg_list)[i] = reg_current;
601 }
602
603 return ERROR_OK;
604 }
605
606 /** get all register list */
607 int nds32_get_gdb_reg_list(struct target *target,
608 struct reg **reg_list[], int *reg_list_size,
609 enum target_register_class reg_class)
610 {
611 struct nds32 *nds32 = target_to_nds32(target);
612
613 switch (reg_class) {
614 case REG_CLASS_ALL:
615 return nds32_get_all_reg_list(nds32, reg_list, reg_list_size);
616 case REG_CLASS_GENERAL:
617 return nds32_get_general_reg_list(nds32, reg_list, reg_list_size);
618 default:
619 return ERROR_FAIL;
620 }
621
622 return ERROR_FAIL;
623 }
624
625 static int nds32_select_memory_mode(struct target *target, uint32_t address,
626 uint32_t length, uint32_t *end_address)
627 {
628 struct nds32 *nds32 = target_to_nds32(target);
629 struct aice_port_s *aice = target_to_aice(target);
630 struct nds32_memory *memory = &(nds32->memory);
631 struct nds32_edm *edm = &(nds32->edm);
632 uint32_t dlm_start, dlm_end;
633 uint32_t ilm_start, ilm_end;
634 uint32_t address_end = address + length;
635
636 /* init end_address */
637 *end_address = address_end;
638
639 if (NDS_MEMORY_ACC_CPU == memory->access_channel)
640 return ERROR_OK;
641
642 if (edm->access_control == false) {
643 LOG_DEBUG("EDM does not support ACC_CTL");
644 return ERROR_OK;
645 }
646
647 if (edm->direct_access_local_memory == false) {
648 LOG_DEBUG("EDM does not support DALM");
649 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
650 return ERROR_OK;
651 }
652
653 if (NDS_MEMORY_SELECT_AUTO != memory->mode) {
654 LOG_DEBUG("Memory mode is not AUTO");
655 return ERROR_OK;
656 }
657
658 /* set default mode */
659 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
660
661 if ((memory->ilm_base != 0) && (memory->ilm_enable == true)) {
662 ilm_start = memory->ilm_start;
663 ilm_end = memory->ilm_end;
664
665 /* case 1, address < ilm_start */
666 if (address < ilm_start) {
667 if (ilm_start < address_end) {
668 /* update end_address to split non-ILM from ILM */
669 *end_address = ilm_start;
670 }
671 /* MEM mode */
672 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
673 } else if ((ilm_start <= address) && (address < ilm_end)) {
674 /* case 2, ilm_start <= address < ilm_end */
675 if (ilm_end < address_end) {
676 /* update end_address to split non-ILM from ILM */
677 *end_address = ilm_end;
678 }
679 /* ILM mode */
680 aice_memory_mode(aice, NDS_MEMORY_SELECT_ILM);
681 } else { /* case 3, ilm_end <= address */
682 /* MEM mode */
683 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
684 }
685
686 return ERROR_OK;
687 } else {
688 LOG_DEBUG("ILM is not enabled");
689 }
690
691 if ((memory->dlm_base != 0) && (memory->dlm_enable == true)) {
692 dlm_start = memory->dlm_start;
693 dlm_end = memory->dlm_end;
694
695 /* case 1, address < dlm_start */
696 if (address < dlm_start) {
697 if (dlm_start < address_end) {
698 /* update end_address to split non-DLM from DLM */
699 *end_address = dlm_start;
700 }
701 /* MEM mode */
702 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
703 } else if ((dlm_start <= address) && (address < dlm_end)) {
704 /* case 2, dlm_start <= address < dlm_end */
705 if (dlm_end < address_end) {
706 /* update end_address to split non-DLM from DLM */
707 *end_address = dlm_end;
708 }
709 /* DLM mode */
710 aice_memory_mode(aice, NDS_MEMORY_SELECT_DLM);
711 } else { /* case 3, dlm_end <= address */
712 /* MEM mode */
713 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
714 }
715
716 return ERROR_OK;
717 } else {
718 LOG_DEBUG("DLM is not enabled");
719 }
720
721 return ERROR_OK;
722 }
723
724 int nds32_read_buffer(struct target *target, uint32_t address,
725 uint32_t size, uint8_t *buffer)
726 {
727 struct nds32 *nds32 = target_to_nds32(target);
728 struct nds32_memory *memory = &(nds32->memory);
729
730 if ((NDS_MEMORY_ACC_CPU == memory->access_channel) &&
731 (target->state != TARGET_HALTED)) {
732 LOG_WARNING("target was not halted");
733 return ERROR_TARGET_NOT_HALTED;
734 }
735
736 LOG_DEBUG("READ BUFFER: ADDR %08" PRIx32 " SIZE %08" PRIx32,
737 address,
738 size);
739
740 int retval = ERROR_OK;
741 struct aice_port_s *aice = target_to_aice(target);
742 uint32_t end_address;
743
744 if (((address % 2) == 0) && (size == 2)) {
745 nds32_select_memory_mode(target, address, 2, &end_address);
746 return aice_read_mem_unit(aice, address, 2, 1, buffer);
747 }
748
749 /* handle unaligned head bytes */
750 if (address % 4) {
751 uint32_t unaligned = 4 - (address % 4);
752
753 if (unaligned > size)
754 unaligned = size;
755
756 nds32_select_memory_mode(target, address, unaligned, &end_address);
757 retval = aice_read_mem_unit(aice, address, 1, unaligned, buffer);
758 if (retval != ERROR_OK)
759 return retval;
760
761 buffer += unaligned;
762 address += unaligned;
763 size -= unaligned;
764 }
765
766 /* handle aligned words */
767 if (size >= 4) {
768 int aligned = size - (size % 4);
769 int read_len;
770
771 do {
772 nds32_select_memory_mode(target, address, aligned, &end_address);
773
774 read_len = end_address - address;
775
776 if (read_len > 8)
777 retval = aice_read_mem_bulk(aice, address, read_len, buffer);
778 else
779 retval = aice_read_mem_unit(aice, address, 4, read_len / 4, buffer);
780
781 if (retval != ERROR_OK)
782 return retval;
783
784 buffer += read_len;
785 address += read_len;
786 size -= read_len;
787 aligned -= read_len;
788
789 } while (aligned != 0);
790 }
791
792 /*prevent byte access when possible (avoid AHB access limitations in some cases)*/
793 if (size >= 2) {
794 int aligned = size - (size % 2);
795 nds32_select_memory_mode(target, address, aligned, &end_address);
796 retval = aice_read_mem_unit(aice, address, 2, aligned / 2, buffer);
797 if (retval != ERROR_OK)
798 return retval;
799
800 buffer += aligned;
801 address += aligned;
802 size -= aligned;
803 }
804 /* handle tail writes of less than 4 bytes */
805 if (size > 0) {
806 nds32_select_memory_mode(target, address, size, &end_address);
807 retval = aice_read_mem_unit(aice, address, 1, size, buffer);
808 if (retval != ERROR_OK)
809 return retval;
810 }
811
812 return ERROR_OK;
813 }
814
815 int nds32_read_memory(struct target *target, uint32_t address,
816 uint32_t size, uint32_t count, uint8_t *buffer)
817 {
818 struct aice_port_s *aice = target_to_aice(target);
819
820 return aice_read_mem_unit(aice, address, size, count, buffer);
821 }
822
823 int nds32_read_phys_memory(struct target *target, target_addr_t address,
824 uint32_t size, uint32_t count, uint8_t *buffer)
825 {
826 struct aice_port_s *aice = target_to_aice(target);
827 struct nds32 *nds32 = target_to_nds32(target);
828 struct nds32_memory *memory = &(nds32->memory);
829 enum nds_memory_access orig_channel;
830 int result;
831
832 /* switch to BUS access mode to skip MMU */
833 orig_channel = memory->access_channel;
834 memory->access_channel = NDS_MEMORY_ACC_BUS;
835 aice_memory_access(aice, memory->access_channel);
836
837 /* The input address is physical address. No need to do address translation. */
838 result = aice_read_mem_unit(aice, address, size, count, buffer);
839
840 /* restore to origin access mode */
841 memory->access_channel = orig_channel;
842 aice_memory_access(aice, memory->access_channel);
843
844 return result;
845 }
846
847 int nds32_write_buffer(struct target *target, uint32_t address,
848 uint32_t size, const uint8_t *buffer)
849 {
850 struct nds32 *nds32 = target_to_nds32(target);
851 struct nds32_memory *memory = &(nds32->memory);
852
853 if ((NDS_MEMORY_ACC_CPU == memory->access_channel) &&
854 (target->state != TARGET_HALTED)) {
855 LOG_WARNING("target was not halted");
856 return ERROR_TARGET_NOT_HALTED;
857 }
858
859 LOG_DEBUG("WRITE BUFFER: ADDR %08" PRIx32 " SIZE %08" PRIx32,
860 address,
861 size);
862
863 struct aice_port_s *aice = target_to_aice(target);
864 int retval = ERROR_OK;
865 uint32_t end_address;
866
867 if (((address % 2) == 0) && (size == 2)) {
868 nds32_select_memory_mode(target, address, 2, &end_address);
869 return aice_write_mem_unit(aice, address, 2, 1, buffer);
870 }
871
872 /* handle unaligned head bytes */
873 if (address % 4) {
874 uint32_t unaligned = 4 - (address % 4);
875
876 if (unaligned > size)
877 unaligned = size;
878
879 nds32_select_memory_mode(target, address, unaligned, &end_address);
880 retval = aice_write_mem_unit(aice, address, 1, unaligned, buffer);
881 if (retval != ERROR_OK)
882 return retval;
883
884 buffer += unaligned;
885 address += unaligned;
886 size -= unaligned;
887 }
888
889 /* handle aligned words */
890 if (size >= 4) {
891 int aligned = size - (size % 4);
892 int write_len;
893
894 do {
895 nds32_select_memory_mode(target, address, aligned, &end_address);
896
897 write_len = end_address - address;
898 if (write_len > 8)
899 retval = aice_write_mem_bulk(aice, address, write_len, buffer);
900 else
901 retval = aice_write_mem_unit(aice, address, 4, write_len / 4, buffer);
902 if (retval != ERROR_OK)
903 return retval;
904
905 buffer += write_len;
906 address += write_len;
907 size -= write_len;
908 aligned -= write_len;
909
910 } while (aligned != 0);
911 }
912
913 /* handle tail writes of less than 4 bytes */
914 if (size > 0) {
915 nds32_select_memory_mode(target, address, size, &end_address);
916 retval = aice_write_mem_unit(aice, address, 1, size, buffer);
917 if (retval != ERROR_OK)
918 return retval;
919 }
920
921 return retval;
922 }
923
924 int nds32_write_memory(struct target *target, uint32_t address,
925 uint32_t size, uint32_t count, const uint8_t *buffer)
926 {
927 struct aice_port_s *aice = target_to_aice(target);
928
929 return aice_write_mem_unit(aice, address, size, count, buffer);
930 }
931
932 int nds32_write_phys_memory(struct target *target, target_addr_t address,
933 uint32_t size, uint32_t count, const uint8_t *buffer)
934 {
935 struct aice_port_s *aice = target_to_aice(target);
936 struct nds32 *nds32 = target_to_nds32(target);
937 struct nds32_memory *memory = &(nds32->memory);
938 enum nds_memory_access orig_channel;
939 int result;
940
941 /* switch to BUS access mode to skip MMU */
942 orig_channel = memory->access_channel;
943 memory->access_channel = NDS_MEMORY_ACC_BUS;
944 aice_memory_access(aice, memory->access_channel);
945
946 /* The input address is physical address. No need to do address translation. */
947 result = aice_write_mem_unit(aice, address, size, count, buffer);
948
949 /* restore to origin access mode */
950 memory->access_channel = orig_channel;
951 aice_memory_access(aice, memory->access_channel);
952
953 return result;
954 }
955
956 int nds32_mmu(struct target *target, int *enabled)
957 {
958 if (target->state != TARGET_HALTED) {
959 LOG_ERROR("%s: target not halted", __func__);
960 return ERROR_TARGET_INVALID;
961 }
962
963 struct nds32 *nds32 = target_to_nds32(target);
964 struct nds32_memory *memory = &(nds32->memory);
965 struct nds32_mmu_config *mmu_config = &(nds32->mmu_config);
966
967 if ((mmu_config->memory_protection == 2) && (memory->address_translation == true))
968 *enabled = 1;
969 else
970 *enabled = 0;
971
972 return ERROR_OK;
973 }
974
975 int nds32_arch_state(struct target *target)
976 {
977 struct nds32 *nds32 = target_to_nds32(target);
978
979 if (nds32->common_magic != NDS32_COMMON_MAGIC) {
980 LOG_ERROR("BUG: called for a non-Andes target");
981 return ERROR_FAIL;
982 }
983
984 uint32_t value_pc, value_psw;
985
986 nds32_get_mapped_reg(nds32, PC, &value_pc);
987 nds32_get_mapped_reg(nds32, IR0, &value_psw);
988
989 LOG_USER("target halted due to %s\n"
990 "psw: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "%s",
991 debug_reason_name(target),
992 value_psw,
993 value_pc,
994 nds32->virtual_hosting ? ", virtual hosting" : "");
995
996 /* save pc value to pseudo register pc */
997 struct reg *reg = register_get_by_name(target->reg_cache, "pc", true);
998 buf_set_u32(reg->value, 0, 32, value_pc);
999
1000 return ERROR_OK;
1001 }
1002
1003 static void nds32_init_must_have_registers(struct nds32 *nds32)
1004 {
1005 struct reg_cache *reg_cache = nds32->core_cache;
1006
1007 /** MUST have general registers */
1008 ((struct nds32_reg *)reg_cache->reg_list[R0].arch_info)->enable = true;
1009 ((struct nds32_reg *)reg_cache->reg_list[R1].arch_info)->enable = true;
1010 ((struct nds32_reg *)reg_cache->reg_list[R2].arch_info)->enable = true;
1011 ((struct nds32_reg *)reg_cache->reg_list[R3].arch_info)->enable = true;
1012 ((struct nds32_reg *)reg_cache->reg_list[R4].arch_info)->enable = true;
1013 ((struct nds32_reg *)reg_cache->reg_list[R5].arch_info)->enable = true;
1014 ((struct nds32_reg *)reg_cache->reg_list[R6].arch_info)->enable = true;
1015 ((struct nds32_reg *)reg_cache->reg_list[R7].arch_info)->enable = true;
1016 ((struct nds32_reg *)reg_cache->reg_list[R8].arch_info)->enable = true;
1017 ((struct nds32_reg *)reg_cache->reg_list[R9].arch_info)->enable = true;
1018 ((struct nds32_reg *)reg_cache->reg_list[R10].arch_info)->enable = true;
1019 ((struct nds32_reg *)reg_cache->reg_list[R15].arch_info)->enable = true;
1020 ((struct nds32_reg *)reg_cache->reg_list[R28].arch_info)->enable = true;
1021 ((struct nds32_reg *)reg_cache->reg_list[R29].arch_info)->enable = true;
1022 ((struct nds32_reg *)reg_cache->reg_list[R30].arch_info)->enable = true;
1023 ((struct nds32_reg *)reg_cache->reg_list[R31].arch_info)->enable = true;
1024 ((struct nds32_reg *)reg_cache->reg_list[PC].arch_info)->enable = true;
1025
1026 /** MUST have configuration system registers */
1027 ((struct nds32_reg *)reg_cache->reg_list[CR0].arch_info)->enable = true;
1028 ((struct nds32_reg *)reg_cache->reg_list[CR1].arch_info)->enable = true;
1029 ((struct nds32_reg *)reg_cache->reg_list[CR2].arch_info)->enable = true;
1030 ((struct nds32_reg *)reg_cache->reg_list[CR3].arch_info)->enable = true;
1031 ((struct nds32_reg *)reg_cache->reg_list[CR4].arch_info)->enable = true;
1032
1033 /** MUST have interrupt system registers */
1034 ((struct nds32_reg *)reg_cache->reg_list[IR0].arch_info)->enable = true;
1035 ((struct nds32_reg *)reg_cache->reg_list[IR1].arch_info)->enable = true;
1036 ((struct nds32_reg *)reg_cache->reg_list[IR3].arch_info)->enable = true;
1037 ((struct nds32_reg *)reg_cache->reg_list[IR4].arch_info)->enable = true;
1038 ((struct nds32_reg *)reg_cache->reg_list[IR6].arch_info)->enable = true;
1039 ((struct nds32_reg *)reg_cache->reg_list[IR9].arch_info)->enable = true;
1040 ((struct nds32_reg *)reg_cache->reg_list[IR11].arch_info)->enable = true;
1041 ((struct nds32_reg *)reg_cache->reg_list[IR14].arch_info)->enable = true;
1042 ((struct nds32_reg *)reg_cache->reg_list[IR15].arch_info)->enable = true;
1043
1044 /** MUST have MMU system registers */
1045 ((struct nds32_reg *)reg_cache->reg_list[MR0].arch_info)->enable = true;
1046
1047 /** MUST have EDM system registers */
1048 ((struct nds32_reg *)reg_cache->reg_list[DR40].arch_info)->enable = true;
1049 ((struct nds32_reg *)reg_cache->reg_list[DR42].arch_info)->enable = true;
1050 }
1051
1052 static int nds32_init_memory_config(struct nds32 *nds32)
1053 {
1054 uint32_t value_cr1; /* ICM_CFG */
1055 uint32_t value_cr2; /* DCM_CFG */
1056 struct nds32_memory *memory = &(nds32->memory);
1057
1058 /* read $cr1 to init instruction memory information */
1059 nds32_get_mapped_reg(nds32, CR1, &value_cr1);
1060 memory->icache.set = value_cr1 & 0x7;
1061 memory->icache.way = (value_cr1 >> 3) & 0x7;
1062 memory->icache.line_size = (value_cr1 >> 6) & 0x7;
1063 memory->icache.lock_support = (value_cr1 >> 9) & 0x1;
1064
1065 memory->ilm_base = (value_cr1 >> 10) & 0x7;
1066 memory->ilm_align_ver = (value_cr1 >> 13) & 0x3;
1067
1068 /* read $cr2 to init data memory information */
1069 nds32_get_mapped_reg(nds32, CR2, &value_cr2);
1070 memory->dcache.set = value_cr2 & 0x7;
1071 memory->dcache.way = (value_cr2 >> 3) & 0x7;
1072 memory->dcache.line_size = (value_cr2 >> 6) & 0x7;
1073 memory->dcache.lock_support = (value_cr2 >> 9) & 0x1;
1074
1075 memory->dlm_base = (value_cr2 >> 10) & 0x7;
1076 memory->dlm_align_ver = (value_cr2 >> 13) & 0x3;
1077
1078 return ERROR_OK;
1079 }
1080
1081 static void nds32_init_config(struct nds32 *nds32)
1082 {
1083 uint32_t value_cr0;
1084 uint32_t value_cr3;
1085 uint32_t value_cr4;
1086 struct nds32_cpu_version *cpu_version = &(nds32->cpu_version);
1087 struct nds32_mmu_config *mmu_config = &(nds32->mmu_config);
1088 struct nds32_misc_config *misc_config = &(nds32->misc_config);
1089
1090 nds32_get_mapped_reg(nds32, CR0, &value_cr0);
1091 nds32_get_mapped_reg(nds32, CR3, &value_cr3);
1092 nds32_get_mapped_reg(nds32, CR4, &value_cr4);
1093
1094 /* config cpu version */
1095 cpu_version->performance_extension = value_cr0 & 0x1;
1096 cpu_version->_16bit_extension = (value_cr0 >> 1) & 0x1;
1097 cpu_version->performance_extension_2 = (value_cr0 >> 2) & 0x1;
1098 cpu_version->cop_fpu_extension = (value_cr0 >> 3) & 0x1;
1099 cpu_version->string_extension = (value_cr0 >> 4) & 0x1;
1100 cpu_version->revision = (value_cr0 >> 16) & 0xFF;
1101 cpu_version->cpu_id_family = (value_cr0 >> 24) & 0xF;
1102 cpu_version->cpu_id_version = (value_cr0 >> 28) & 0xF;
1103
1104 /* config MMU */
1105 mmu_config->memory_protection = value_cr3 & 0x3;
1106 mmu_config->memory_protection_version = (value_cr3 >> 2) & 0x1F;
1107 mmu_config->fully_associative_tlb = (value_cr3 >> 7) & 0x1;
1108 if (mmu_config->fully_associative_tlb) {
1109 mmu_config->tlb_size = (value_cr3 >> 8) & 0x7F;
1110 } else {
1111 mmu_config->tlb_ways = (value_cr3 >> 8) & 0x7;
1112 mmu_config->tlb_sets = (value_cr3 >> 11) & 0x7;
1113 }
1114 mmu_config->_8k_page_support = (value_cr3 >> 15) & 0x1;
1115 mmu_config->extra_page_size_support = (value_cr3 >> 16) & 0xFF;
1116 mmu_config->tlb_lock = (value_cr3 >> 24) & 0x1;
1117 mmu_config->hardware_page_table_walker = (value_cr3 >> 25) & 0x1;
1118 mmu_config->default_endian = (value_cr3 >> 26) & 0x1;
1119 mmu_config->partition_num = (value_cr3 >> 27) & 0x1;
1120 mmu_config->invisible_tlb = (value_cr3 >> 28) & 0x1;
1121 mmu_config->vlpt = (value_cr3 >> 29) & 0x1;
1122 mmu_config->ntme = (value_cr3 >> 30) & 0x1;
1123 mmu_config->drde = (value_cr3 >> 31) & 0x1;
1124
1125 /* config misc */
1126 misc_config->edm = value_cr4 & 0x1;
1127 misc_config->local_memory_dma = (value_cr4 >> 1) & 0x1;
1128 misc_config->performance_monitor = (value_cr4 >> 2) & 0x1;
1129 misc_config->high_speed_memory_port = (value_cr4 >> 3) & 0x1;
1130 misc_config->debug_tracer = (value_cr4 >> 4) & 0x1;
1131 misc_config->div_instruction = (value_cr4 >> 5) & 0x1;
1132 misc_config->mac_instruction = (value_cr4 >> 6) & 0x1;
1133 misc_config->audio_isa = (value_cr4 >> 7) & 0x3;
1134 misc_config->l2_cache = (value_cr4 >> 9) & 0x1;
1135 misc_config->reduce_register = (value_cr4 >> 10) & 0x1;
1136 misc_config->addr_24 = (value_cr4 >> 11) & 0x1;
1137 misc_config->interruption_level = (value_cr4 >> 12) & 0x1;
1138 misc_config->baseline_instruction = (value_cr4 >> 13) & 0x7;
1139 misc_config->no_dx_register = (value_cr4 >> 16) & 0x1;
1140 misc_config->implement_dependant_register = (value_cr4 >> 17) & 0x1;
1141 misc_config->implement_dependant_sr_encoding = (value_cr4 >> 18) & 0x1;
1142 misc_config->ifc = (value_cr4 >> 19) & 0x1;
1143 misc_config->mcu = (value_cr4 >> 20) & 0x1;
1144 misc_config->shadow = (value_cr4 >> 21) & 0x7;
1145 misc_config->ex9 = (value_cr4 >> 24) & 0x1;
1146
1147 nds32_init_memory_config(nds32);
1148 }
1149
1150 static int nds32_init_option_registers(struct nds32 *nds32)
1151 {
1152 struct reg_cache *reg_cache = nds32->core_cache;
1153 struct nds32_cpu_version *cpu_version = &(nds32->cpu_version);
1154 struct nds32_mmu_config *mmu_config = &(nds32->mmu_config);
1155 struct nds32_misc_config *misc_config = &(nds32->misc_config);
1156 struct nds32_memory *memory_config = &(nds32->memory);
1157
1158 bool no_cr5;
1159 bool mr10_exist;
1160 bool no_racr0;
1161
1162 if (((cpu_version->cpu_id_family == 0xC) || (cpu_version->cpu_id_family == 0xD)) &&
1163 ((cpu_version->revision & 0xFC) == 0)) {
1164 no_cr5 = true;
1165 mr10_exist = true;
1166 no_racr0 = true;
1167 } else {
1168 no_cr5 = false;
1169 mr10_exist = false;
1170 no_racr0 = false;
1171 }
1172
1173 if (misc_config->reduce_register == false) {
1174 ((struct nds32_reg *)reg_cache->reg_list[R11].arch_info)->enable = true;
1175 ((struct nds32_reg *)reg_cache->reg_list[R12].arch_info)->enable = true;
1176 ((struct nds32_reg *)reg_cache->reg_list[R13].arch_info)->enable = true;
1177 ((struct nds32_reg *)reg_cache->reg_list[R14].arch_info)->enable = true;
1178 ((struct nds32_reg *)reg_cache->reg_list[R16].arch_info)->enable = true;
1179 ((struct nds32_reg *)reg_cache->reg_list[R17].arch_info)->enable = true;
1180 ((struct nds32_reg *)reg_cache->reg_list[R18].arch_info)->enable = true;
1181 ((struct nds32_reg *)reg_cache->reg_list[R19].arch_info)->enable = true;
1182 ((struct nds32_reg *)reg_cache->reg_list[R20].arch_info)->enable = true;
1183 ((struct nds32_reg *)reg_cache->reg_list[R21].arch_info)->enable = true;
1184 ((struct nds32_reg *)reg_cache->reg_list[R22].arch_info)->enable = true;
1185 ((struct nds32_reg *)reg_cache->reg_list[R23].arch_info)->enable = true;
1186 ((struct nds32_reg *)reg_cache->reg_list[R24].arch_info)->enable = true;
1187 ((struct nds32_reg *)reg_cache->reg_list[R25].arch_info)->enable = true;
1188 ((struct nds32_reg *)reg_cache->reg_list[R26].arch_info)->enable = true;
1189 ((struct nds32_reg *)reg_cache->reg_list[R27].arch_info)->enable = true;
1190 }
1191
1192 if (misc_config->no_dx_register == false) {
1193 ((struct nds32_reg *)reg_cache->reg_list[D0LO].arch_info)->enable = true;
1194 ((struct nds32_reg *)reg_cache->reg_list[D0HI].arch_info)->enable = true;
1195 ((struct nds32_reg *)reg_cache->reg_list[D1LO].arch_info)->enable = true;
1196 ((struct nds32_reg *)reg_cache->reg_list[D1HI].arch_info)->enable = true;
1197 }
1198
1199 if (misc_config->ex9)
1200 ((struct nds32_reg *)reg_cache->reg_list[ITB].arch_info)->enable = true;
1201
1202 if (no_cr5 == false)
1203 ((struct nds32_reg *)reg_cache->reg_list[CR5].arch_info)->enable = true;
1204
1205 if (cpu_version->cop_fpu_extension) {
1206 ((struct nds32_reg *)reg_cache->reg_list[CR6].arch_info)->enable = true;
1207 ((struct nds32_reg *)reg_cache->reg_list[FPCSR].arch_info)->enable = true;
1208 ((struct nds32_reg *)reg_cache->reg_list[FPCFG].arch_info)->enable = true;
1209 }
1210
1211 if (mmu_config->memory_protection == 1) {
1212 /* Secure MPU has no IPC, IPSW, P_ITYPE */
1213 ((struct nds32_reg *)reg_cache->reg_list[IR1].arch_info)->enable = false;
1214 ((struct nds32_reg *)reg_cache->reg_list[IR9].arch_info)->enable = false;
1215 }
1216
1217 if (nds32->privilege_level != 0)
1218 ((struct nds32_reg *)reg_cache->reg_list[IR3].arch_info)->enable = false;
1219
1220 if (misc_config->mcu == true)
1221 ((struct nds32_reg *)reg_cache->reg_list[IR4].arch_info)->enable = false;
1222
1223 if (misc_config->interruption_level == false) {
1224 ((struct nds32_reg *)reg_cache->reg_list[IR2].arch_info)->enable = true;
1225 ((struct nds32_reg *)reg_cache->reg_list[IR5].arch_info)->enable = true;
1226 ((struct nds32_reg *)reg_cache->reg_list[IR10].arch_info)->enable = true;
1227 ((struct nds32_reg *)reg_cache->reg_list[IR12].arch_info)->enable = true;
1228 ((struct nds32_reg *)reg_cache->reg_list[IR13].arch_info)->enable = true;
1229
1230 /* Secure MPU has no IPC, IPSW, P_ITYPE */
1231 if (mmu_config->memory_protection != 1)
1232 ((struct nds32_reg *)reg_cache->reg_list[IR7].arch_info)->enable = true;
1233 }
1234
1235 if ((cpu_version->cpu_id_family == 0x9) ||
1236 (cpu_version->cpu_id_family == 0xA) ||
1237 (cpu_version->cpu_id_family == 0xC) ||
1238 (cpu_version->cpu_id_family == 0xD))
1239 ((struct nds32_reg *)reg_cache->reg_list[IR8].arch_info)->enable = true;
1240
1241 if (misc_config->shadow == 1) {
1242 ((struct nds32_reg *)reg_cache->reg_list[IR16].arch_info)->enable = true;
1243 ((struct nds32_reg *)reg_cache->reg_list[IR17].arch_info)->enable = true;
1244 }
1245
1246 if (misc_config->ifc)
1247 ((struct nds32_reg *)reg_cache->reg_list[IFC_LP].arch_info)->enable = true;
1248
1249 if (nds32->privilege_level != 0)
1250 ((struct nds32_reg *)reg_cache->reg_list[MR0].arch_info)->enable = false;
1251
1252 if (mmu_config->memory_protection == 1) {
1253 if (mmu_config->memory_protection_version == 24)
1254 ((struct nds32_reg *)reg_cache->reg_list[MR4].arch_info)->enable = true;
1255
1256 if (nds32->privilege_level == 0) {
1257 if ((mmu_config->memory_protection_version == 16) ||
1258 (mmu_config->memory_protection_version == 24)) {
1259 ((struct nds32_reg *)reg_cache->reg_list[MR11].arch_info)->enable = true;
1260 ((struct nds32_reg *)reg_cache->reg_list[SECUR0].arch_info)->enable = true;
1261 ((struct nds32_reg *)reg_cache->reg_list[IR20].arch_info)->enable = true;
1262 ((struct nds32_reg *)reg_cache->reg_list[IR22].arch_info)->enable = true;
1263 ((struct nds32_reg *)reg_cache->reg_list[IR24].arch_info)->enable = true;
1264 ((struct nds32_reg *)reg_cache->reg_list[IR30].arch_info)->enable = true;
1265
1266 if (misc_config->shadow == 1) {
1267 ((struct nds32_reg *)reg_cache->reg_list[IR21].arch_info)->enable = true;
1268 ((struct nds32_reg *)reg_cache->reg_list[IR23].arch_info)->enable = true;
1269 ((struct nds32_reg *)reg_cache->reg_list[IR25].arch_info)->enable = true;
1270 }
1271 }
1272 }
1273 } else if (mmu_config->memory_protection == 2) {
1274 ((struct nds32_reg *)reg_cache->reg_list[MR1].arch_info)->enable = true;
1275 ((struct nds32_reg *)reg_cache->reg_list[MR4].arch_info)->enable = true;
1276
1277 if ((cpu_version->cpu_id_family != 0xA) && (cpu_version->cpu_id_family != 0xC) &&
1278 (cpu_version->cpu_id_family != 0xD))
1279 ((struct nds32_reg *)reg_cache->reg_list[MR5].arch_info)->enable = true;
1280 }
1281
1282 if (mmu_config->memory_protection > 0) {
1283 ((struct nds32_reg *)reg_cache->reg_list[MR2].arch_info)->enable = true;
1284 ((struct nds32_reg *)reg_cache->reg_list[MR3].arch_info)->enable = true;
1285 }
1286
1287 if (memory_config->ilm_base != 0)
1288 if (nds32->privilege_level == 0)
1289 ((struct nds32_reg *)reg_cache->reg_list[MR6].arch_info)->enable = true;
1290
1291 if (memory_config->dlm_base != 0)
1292 if (nds32->privilege_level == 0)
1293 ((struct nds32_reg *)reg_cache->reg_list[MR7].arch_info)->enable = true;
1294
1295 if ((memory_config->icache.line_size != 0) && (memory_config->dcache.line_size != 0))
1296 ((struct nds32_reg *)reg_cache->reg_list[MR8].arch_info)->enable = true;
1297
1298 if (misc_config->high_speed_memory_port)
1299 ((struct nds32_reg *)reg_cache->reg_list[MR9].arch_info)->enable = true;
1300
1301 if (mr10_exist)
1302 ((struct nds32_reg *)reg_cache->reg_list[MR10].arch_info)->enable = true;
1303
1304 if (misc_config->edm) {
1305 int dr_reg_n = nds32->edm.breakpoint_num * 5;
1306
1307 for (int i = 0 ; i < dr_reg_n ; i++)
1308 ((struct nds32_reg *)reg_cache->reg_list[DR0 + i].arch_info)->enable = true;
1309
1310 ((struct nds32_reg *)reg_cache->reg_list[DR41].arch_info)->enable = true;
1311 ((struct nds32_reg *)reg_cache->reg_list[DR43].arch_info)->enable = true;
1312 ((struct nds32_reg *)reg_cache->reg_list[DR44].arch_info)->enable = true;
1313 ((struct nds32_reg *)reg_cache->reg_list[DR45].arch_info)->enable = true;
1314 }
1315
1316 if (misc_config->debug_tracer) {
1317 ((struct nds32_reg *)reg_cache->reg_list[DR46].arch_info)->enable = true;
1318 ((struct nds32_reg *)reg_cache->reg_list[DR47].arch_info)->enable = true;
1319 }
1320
1321 if (misc_config->performance_monitor) {
1322 ((struct nds32_reg *)reg_cache->reg_list[PFR0].arch_info)->enable = true;
1323 ((struct nds32_reg *)reg_cache->reg_list[PFR1].arch_info)->enable = true;
1324 ((struct nds32_reg *)reg_cache->reg_list[PFR2].arch_info)->enable = true;
1325 ((struct nds32_reg *)reg_cache->reg_list[PFR3].arch_info)->enable = true;
1326 }
1327
1328 if (misc_config->local_memory_dma) {
1329 ((struct nds32_reg *)reg_cache->reg_list[DMAR0].arch_info)->enable = true;
1330 ((struct nds32_reg *)reg_cache->reg_list[DMAR1].arch_info)->enable = true;
1331 ((struct nds32_reg *)reg_cache->reg_list[DMAR2].arch_info)->enable = true;
1332 ((struct nds32_reg *)reg_cache->reg_list[DMAR3].arch_info)->enable = true;
1333 ((struct nds32_reg *)reg_cache->reg_list[DMAR4].arch_info)->enable = true;
1334 ((struct nds32_reg *)reg_cache->reg_list[DMAR5].arch_info)->enable = true;
1335 ((struct nds32_reg *)reg_cache->reg_list[DMAR6].arch_info)->enable = true;
1336 ((struct nds32_reg *)reg_cache->reg_list[DMAR7].arch_info)->enable = true;
1337 ((struct nds32_reg *)reg_cache->reg_list[DMAR8].arch_info)->enable = true;
1338 ((struct nds32_reg *)reg_cache->reg_list[DMAR9].arch_info)->enable = true;
1339 ((struct nds32_reg *)reg_cache->reg_list[DMAR10].arch_info)->enable = true;
1340 }
1341
1342 if ((misc_config->local_memory_dma || misc_config->performance_monitor) &&
1343 (no_racr0 == false))
1344 ((struct nds32_reg *)reg_cache->reg_list[RACR].arch_info)->enable = true;
1345
1346 if (cpu_version->cop_fpu_extension || (misc_config->audio_isa != 0))
1347 ((struct nds32_reg *)reg_cache->reg_list[FUCPR].arch_info)->enable = true;
1348
1349 if (misc_config->audio_isa != 0) {
1350 if (misc_config->audio_isa > 1) {
1351 ((struct nds32_reg *)reg_cache->reg_list[D0L24].arch_info)->enable = true;
1352 ((struct nds32_reg *)reg_cache->reg_list[D1L24].arch_info)->enable = true;
1353 }
1354
1355 ((struct nds32_reg *)reg_cache->reg_list[I0].arch_info)->enable = true;
1356 ((struct nds32_reg *)reg_cache->reg_list[I1].arch_info)->enable = true;
1357 ((struct nds32_reg *)reg_cache->reg_list[I2].arch_info)->enable = true;
1358 ((struct nds32_reg *)reg_cache->reg_list[I3].arch_info)->enable = true;
1359 ((struct nds32_reg *)reg_cache->reg_list[I4].arch_info)->enable = true;
1360 ((struct nds32_reg *)reg_cache->reg_list[I5].arch_info)->enable = true;
1361 ((struct nds32_reg *)reg_cache->reg_list[I6].arch_info)->enable = true;
1362 ((struct nds32_reg *)reg_cache->reg_list[I7].arch_info)->enable = true;
1363 ((struct nds32_reg *)reg_cache->reg_list[M1].arch_info)->enable = true;
1364 ((struct nds32_reg *)reg_cache->reg_list[M2].arch_info)->enable = true;
1365 ((struct nds32_reg *)reg_cache->reg_list[M3].arch_info)->enable = true;
1366 ((struct nds32_reg *)reg_cache->reg_list[M5].arch_info)->enable = true;
1367 ((struct nds32_reg *)reg_cache->reg_list[M6].arch_info)->enable = true;
1368 ((struct nds32_reg *)reg_cache->reg_list[M7].arch_info)->enable = true;
1369 ((struct nds32_reg *)reg_cache->reg_list[MOD].arch_info)->enable = true;
1370 ((struct nds32_reg *)reg_cache->reg_list[LBE].arch_info)->enable = true;
1371 ((struct nds32_reg *)reg_cache->reg_list[LE].arch_info)->enable = true;
1372 ((struct nds32_reg *)reg_cache->reg_list[LC].arch_info)->enable = true;
1373 ((struct nds32_reg *)reg_cache->reg_list[ADM_VBASE].arch_info)->enable = true;
1374 ((struct nds32_reg *)reg_cache->reg_list[SHFT_CTL0].arch_info)->enable = true;
1375 ((struct nds32_reg *)reg_cache->reg_list[SHFT_CTL1].arch_info)->enable = true;
1376
1377 uint32_t value_mod;
1378 uint32_t fucpr_backup;
1379 /* enable fpu and get configuration */
1380 nds32_get_mapped_reg(nds32, FUCPR, &fucpr_backup);
1381 if ((fucpr_backup & 0x80000000) == 0)
1382 nds32_set_mapped_reg(nds32, FUCPR, fucpr_backup | 0x80000000);
1383 nds32_get_mapped_reg(nds32, MOD, &value_mod);
1384 /* restore origin fucpr value */
1385 if ((fucpr_backup & 0x80000000) == 0)
1386 nds32_set_mapped_reg(nds32, FUCPR, fucpr_backup);
1387
1388 if ((value_mod >> 6) & 0x1) {
1389 ((struct nds32_reg *)reg_cache->reg_list[CB_CTL].arch_info)->enable = true;
1390 ((struct nds32_reg *)reg_cache->reg_list[CBB0].arch_info)->enable = true;
1391 ((struct nds32_reg *)reg_cache->reg_list[CBB1].arch_info)->enable = true;
1392 ((struct nds32_reg *)reg_cache->reg_list[CBB2].arch_info)->enable = true;
1393 ((struct nds32_reg *)reg_cache->reg_list[CBB3].arch_info)->enable = true;
1394 ((struct nds32_reg *)reg_cache->reg_list[CBE0].arch_info)->enable = true;
1395 ((struct nds32_reg *)reg_cache->reg_list[CBE1].arch_info)->enable = true;
1396 ((struct nds32_reg *)reg_cache->reg_list[CBE2].arch_info)->enable = true;
1397 ((struct nds32_reg *)reg_cache->reg_list[CBE3].arch_info)->enable = true;
1398 }
1399 }
1400
1401 if ((cpu_version->cpu_id_family == 0x9) ||
1402 (cpu_version->cpu_id_family == 0xA) ||
1403 (cpu_version->cpu_id_family == 0xC)) {
1404
1405 ((struct nds32_reg *)reg_cache->reg_list[IDR0].arch_info)->enable = true;
1406 ((struct nds32_reg *)reg_cache->reg_list[IDR1].arch_info)->enable = true;
1407
1408 if ((cpu_version->cpu_id_family == 0xC) && (cpu_version->revision == 0x0C))
1409 ((struct nds32_reg *)reg_cache->reg_list[IDR0].arch_info)->enable = false;
1410 }
1411
1412 uint32_t ir3_value;
1413 uint32_t ivb_prog_pri_lvl;
1414 uint32_t ivb_ivic_ver;
1415
1416 nds32_get_mapped_reg(nds32, IR3, &ir3_value);
1417 ivb_prog_pri_lvl = ir3_value & 0x1;
1418 ivb_ivic_ver = (ir3_value >> 11) & 0x3;
1419
1420 if ((ivb_prog_pri_lvl == 1) || (ivb_ivic_ver >= 1)) {
1421 ((struct nds32_reg *)reg_cache->reg_list[IR18].arch_info)->enable = true;
1422 ((struct nds32_reg *)reg_cache->reg_list[IR19].arch_info)->enable = true;
1423 }
1424
1425 if (ivb_ivic_ver >= 1) {
1426 ((struct nds32_reg *)reg_cache->reg_list[IR26].arch_info)->enable = true;
1427 ((struct nds32_reg *)reg_cache->reg_list[IR27].arch_info)->enable = true;
1428 ((struct nds32_reg *)reg_cache->reg_list[IR28].arch_info)->enable = true;
1429 ((struct nds32_reg *)reg_cache->reg_list[IR29].arch_info)->enable = true;
1430 }
1431
1432 return ERROR_OK;
1433 }
1434
1435 int nds32_init_register_table(struct nds32 *nds32)
1436 {
1437 nds32_init_must_have_registers(nds32);
1438
1439 return ERROR_OK;
1440 }
1441
1442 int nds32_add_software_breakpoint(struct target *target,
1443 struct breakpoint *breakpoint)
1444 {
1445 uint32_t data;
1446 uint32_t check_data;
1447 uint32_t break_insn;
1448
1449 /* check the breakpoint size */
1450 target->type->read_buffer(target, breakpoint->address, 4, (uint8_t *)&data);
1451
1452 /* backup origin instruction
1453 * instruction is big-endian */
1454 if (*(char *)&data & 0x80) { /* 16-bits instruction */
1455 breakpoint->length = 2;
1456 break_insn = NDS32_BREAK_16;
1457 } else { /* 32-bits instruction */
1458 breakpoint->length = 4;
1459 break_insn = NDS32_BREAK_32;
1460 }
1461
1462 free(breakpoint->orig_instr);
1463
1464 breakpoint->orig_instr = malloc(breakpoint->length);
1465 memcpy(breakpoint->orig_instr, &data, breakpoint->length);
1466
1467 /* self-modified code */
1468 target->type->write_buffer(target, breakpoint->address, breakpoint->length, (const uint8_t *)&break_insn);
1469 /* write_back & invalidate dcache & invalidate icache */
1470 nds32_cache_sync(target, breakpoint->address, breakpoint->length);
1471
1472 /* read back to check */
1473 target->type->read_buffer(target, breakpoint->address, breakpoint->length, (uint8_t *)&check_data);
1474 if (memcmp(&check_data, &break_insn, breakpoint->length) == 0)
1475 return ERROR_OK;
1476
1477 return ERROR_FAIL;
1478 }
1479
1480 int nds32_remove_software_breakpoint(struct target *target,
1481 struct breakpoint *breakpoint)
1482 {
1483 uint32_t check_data;
1484 uint32_t break_insn;
1485
1486 if (breakpoint->length == 2)
1487 break_insn = NDS32_BREAK_16;
1488 else if (breakpoint->length == 4)
1489 break_insn = NDS32_BREAK_32;
1490 else
1491 return ERROR_FAIL;
1492
1493 target->type->read_buffer(target, breakpoint->address, breakpoint->length,
1494 (uint8_t *)&check_data);
1495
1496 /* break instruction is modified */
1497 if (memcmp(&check_data, &break_insn, breakpoint->length) != 0)
1498 return ERROR_FAIL;
1499
1500 /* self-modified code */
1501 target->type->write_buffer(target, breakpoint->address, breakpoint->length,
1502 breakpoint->orig_instr);
1503
1504 /* write_back & invalidate dcache & invalidate icache */
1505 nds32_cache_sync(target, breakpoint->address, breakpoint->length);
1506
1507 return ERROR_OK;
1508 }
1509
1510 /**
1511 * Restore the processor context on an Andes target. The full processor
1512 * context is analyzed to see if any of the registers are dirty on this end, but
1513 * have a valid new value. If this is the case, the processor is changed to the
1514 * appropriate mode and the new register values are written out to the
1515 * processor. If there happens to be a dirty register with an invalid value, an
1516 * error will be logged.
1517 *
1518 * @param target Pointer to the Andes target to have its context restored
1519 * @return Error status if the target is not halted.
1520 */
1521 int nds32_restore_context(struct target *target)
1522 {
1523 struct nds32 *nds32 = target_to_nds32(target);
1524 struct aice_port_s *aice = target_to_aice(target);
1525 struct reg_cache *reg_cache = nds32->core_cache;
1526 struct reg *reg;
1527 struct nds32_reg *reg_arch_info;
1528 unsigned int i;
1529
1530 LOG_DEBUG("-");
1531
1532 if (target->state != TARGET_HALTED) {
1533 LOG_WARNING("target not halted");
1534 return ERROR_TARGET_NOT_HALTED;
1535 }
1536
1537 /* check if there are dirty registers */
1538 for (i = 0; i < reg_cache->num_regs; i++) {
1539 reg = &(reg_cache->reg_list[i]);
1540 if (reg->dirty == true) {
1541 if (reg->valid == true) {
1542
1543 LOG_DEBUG("examining dirty reg: %s", reg->name);
1544 LOG_DEBUG("writing register %d with value 0x%8.8" PRIx32,
1545 i, buf_get_u32(reg->value, 0, 32));
1546
1547 reg_arch_info = reg->arch_info;
1548 if (FD0 <= reg_arch_info->num && reg_arch_info->num <= FD31) {
1549 uint64_t val = buf_get_u64(reg_arch_info->value, 0, 64);
1550 aice_write_reg_64(aice, reg_arch_info->num, val);
1551 } else {
1552 uint32_t val = buf_get_u32(reg_arch_info->value, 0, 32);
1553 aice_write_register(aice, reg_arch_info->num, val);
1554 }
1555
1556 reg->valid = true;
1557 reg->dirty = false;
1558 }
1559 }
1560 }
1561
1562 return ERROR_OK;
1563 }
1564
1565 int nds32_edm_config(struct nds32 *nds32)
1566 {
1567 struct target *target = nds32->target;
1568 struct aice_port_s *aice = target_to_aice(target);
1569 uint32_t edm_cfg;
1570 uint32_t edm_ctl;
1571
1572 aice_read_debug_reg(aice, NDS_EDM_SR_EDM_CFG, &edm_cfg);
1573
1574 nds32->edm.version = (edm_cfg >> 16) & 0xFFFF;
1575 LOG_INFO("EDM version 0x%04x", nds32->edm.version);
1576
1577 nds32->edm.breakpoint_num = (edm_cfg & 0x7) + 1;
1578
1579 if ((nds32->edm.version & 0x1000) || (0x60 <= nds32->edm.version))
1580 nds32->edm.access_control = true;
1581 else
1582 nds32->edm.access_control = false;
1583
1584 if ((edm_cfg >> 4) & 0x1)
1585 nds32->edm.direct_access_local_memory = true;
1586 else
1587 nds32->edm.direct_access_local_memory = false;
1588
1589 if (nds32->edm.version <= 0x20)
1590 nds32->edm.direct_access_local_memory = false;
1591
1592 aice_read_debug_reg(aice, NDS_EDM_SR_EDM_CTL, &edm_ctl);
1593 if (edm_ctl & (0x1 << 29))
1594 nds32->edm.support_max_stop = true;
1595 else
1596 nds32->edm.support_max_stop = false;
1597
1598 /* set passcode for secure MCU */
1599 nds32_login(nds32);
1600
1601 return ERROR_OK;
1602 }
1603
1604 int nds32_config(struct nds32 *nds32)
1605 {
1606 nds32_init_config(nds32);
1607
1608 /* init optional system registers according to config registers */
1609 nds32_init_option_registers(nds32);
1610
1611 /* get max interrupt level */
1612 if (nds32->misc_config.interruption_level)
1613 nds32->max_interrupt_level = 2;
1614 else
1615 nds32->max_interrupt_level = 3;
1616
1617 /* get ILM/DLM size from MR6/MR7 */
1618 uint32_t value_mr6, value_mr7;
1619 uint32_t size_index;
1620 nds32_get_mapped_reg(nds32, MR6, &value_mr6);
1621 size_index = (value_mr6 >> 1) & 0xF;
1622 nds32->memory.ilm_size = nds32_lm_size_table[size_index];
1623
1624 nds32_get_mapped_reg(nds32, MR7, &value_mr7);
1625 size_index = (value_mr7 >> 1) & 0xF;
1626 nds32->memory.dlm_size = nds32_lm_size_table[size_index];
1627
1628 return ERROR_OK;
1629 }
1630
1631 int nds32_init_arch_info(struct target *target, struct nds32 *nds32)
1632 {
1633 target->arch_info = nds32;
1634 nds32->target = target;
1635
1636 nds32->common_magic = NDS32_COMMON_MAGIC;
1637 nds32->init_arch_info_after_halted = false;
1638 nds32->auto_convert_hw_bp = true;
1639 nds32->global_stop = false;
1640 nds32->soft_reset_halt = false;
1641 nds32->edm_passcode = NULL;
1642 nds32->privilege_level = 0;
1643 nds32->boot_time = 1500;
1644 nds32->reset_halt_as_examine = false;
1645 nds32->keep_target_edm_ctl = false;
1646 nds32->word_access_mem = false;
1647 nds32->virtual_hosting = true;
1648 nds32->hit_syscall = false;
1649 nds32->active_syscall_id = NDS32_SYSCALL_UNDEFINED;
1650 nds32->virtual_hosting_errno = 0;
1651 nds32->virtual_hosting_ctrl_c = false;
1652 nds32->attached = false;
1653
1654 nds32->syscall_break.asid = 0;
1655 nds32->syscall_break.length = 4;
1656 nds32->syscall_break.set = 0;
1657 nds32->syscall_break.orig_instr = NULL;
1658 nds32->syscall_break.next = NULL;
1659 nds32->syscall_break.unique_id = 0x515CAll + target->target_number;
1660 nds32->syscall_break.linked_brp = 0;
1661
1662 nds32_reg_init();
1663
1664 if (ERROR_FAIL == nds32_reg_cache_init(target, nds32))
1665 return ERROR_FAIL;
1666
1667 if (ERROR_OK != nds32_init_register_table(nds32))
1668 return ERROR_FAIL;
1669
1670 return ERROR_OK;
1671 }
1672
1673 int nds32_virtual_to_physical(struct target *target, target_addr_t address, target_addr_t *physical)
1674 {
1675 struct nds32 *nds32 = target_to_nds32(target);
1676
1677 if (nds32->memory.address_translation == false) {
1678 *physical = address;
1679 return ERROR_OK;
1680 }
1681
1682 if (ERROR_OK == nds32_probe_tlb(nds32, address, physical))
1683 return ERROR_OK;
1684
1685 if (ERROR_OK == nds32_walk_page_table(nds32, address, physical))
1686 return ERROR_OK;
1687
1688 return ERROR_FAIL;
1689 }
1690
1691 int nds32_cache_sync(struct target *target, target_addr_t address, uint32_t length)
1692 {
1693 struct aice_port_s *aice = target_to_aice(target);
1694 struct nds32 *nds32 = target_to_nds32(target);
1695 struct nds32_cache *dcache = &(nds32->memory.dcache);
1696 struct nds32_cache *icache = &(nds32->memory.icache);
1697 uint32_t dcache_line_size = nds32_line_size_table[dcache->line_size];
1698 uint32_t icache_line_size = nds32_line_size_table[icache->line_size];
1699 uint32_t cur_address;
1700 int result;
1701 uint32_t start_line, end_line;
1702 uint32_t cur_line;
1703
1704 if ((dcache->line_size != 0) && (dcache->enable == true)) {
1705 /* address / dcache_line_size */
1706 start_line = address >> (dcache->line_size + 2);
1707 /* (address + length - 1) / dcache_line_size */
1708 end_line = (address + length - 1) >> (dcache->line_size + 2);
1709
1710 for (cur_address = address, cur_line = start_line;
1711 cur_line <= end_line;
1712 cur_address += dcache_line_size, cur_line++) {
1713 /* D$ write back */
1714 result = aice_cache_ctl(aice, AICE_CACHE_CTL_L1D_VA_WB, cur_address);
1715 if (result != ERROR_OK)
1716 return result;
1717
1718 /* D$ invalidate */
1719 result = aice_cache_ctl(aice, AICE_CACHE_CTL_L1D_VA_INVAL, cur_address);
1720 if (result != ERROR_OK)
1721 return result;
1722 }
1723 }
1724
1725 if ((icache->line_size != 0) && (icache->enable == true)) {
1726 /* address / icache_line_size */
1727 start_line = address >> (icache->line_size + 2);
1728 /* (address + length - 1) / icache_line_size */
1729 end_line = (address + length - 1) >> (icache->line_size + 2);
1730
1731 for (cur_address = address, cur_line = start_line;
1732 cur_line <= end_line;
1733 cur_address += icache_line_size, cur_line++) {
1734 /* Because PSW.IT is turned off under debug exception, address MUST
1735 * be physical address. L1I_VA_INVALIDATE uses PSW.IT to decide
1736 * address translation or not. */
1737 target_addr_t physical_addr;
1738 if (ERROR_FAIL == target->type->virt2phys(target, cur_address,
1739 &physical_addr))
1740 return ERROR_FAIL;
1741
1742 /* I$ invalidate */
1743 result = aice_cache_ctl(aice, AICE_CACHE_CTL_L1I_VA_INVAL, physical_addr);
1744 if (result != ERROR_OK)
1745 return result;
1746 }
1747 }
1748
1749 return ERROR_OK;
1750 }
1751
1752 uint32_t nds32_nextpc(struct nds32 *nds32, int current, uint32_t address)
1753 {
1754 if (!current)
1755 nds32_set_mapped_reg(nds32, PC, address);
1756 else
1757 nds32_get_mapped_reg(nds32, PC, &address);
1758
1759 return address;
1760 }
1761
1762 int nds32_step(struct target *target, int current,
1763 target_addr_t address, int handle_breakpoints)
1764 {
1765 LOG_DEBUG("target->state: %s",
1766 target_state_name(target));
1767
1768 if (target->state != TARGET_HALTED) {
1769 LOG_WARNING("target was not halted");
1770 return ERROR_TARGET_NOT_HALTED;
1771 }
1772
1773 struct nds32 *nds32 = target_to_nds32(target);
1774
1775 address = nds32_nextpc(nds32, current, address);
1776
1777 LOG_DEBUG("STEP PC %08" TARGET_PRIxADDR "%s", address, !current ? "!" : "");
1778
1779 /** set DSSIM */
1780 uint32_t ir14_value;
1781 nds32_get_mapped_reg(nds32, IR14, &ir14_value);
1782 if (nds32->step_isr_enable)
1783 ir14_value |= (0x1 << 31);
1784 else
1785 ir14_value &= ~(0x1 << 31);
1786 nds32_set_mapped_reg(nds32, IR14, ir14_value);
1787
1788 /* check hit_syscall before leave_debug_state() because
1789 * leave_debug_state() may clear hit_syscall flag */
1790 bool no_step = false;
1791 if (nds32->hit_syscall)
1792 /* step after hit_syscall should be ignored because
1793 * leave_debug_state will step implicitly to skip the
1794 * syscall */
1795 no_step = true;
1796
1797 /********* TODO: maybe create another function to handle this part */
1798 CHECK_RETVAL(nds32->leave_debug_state(nds32, true));
1799 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_RESUMED));
1800
1801 if (no_step == false) {
1802 struct aice_port_s *aice = target_to_aice(target);
1803 if (ERROR_OK != aice_step(aice))
1804 return ERROR_FAIL;
1805 }
1806
1807 /* save state */
1808 CHECK_RETVAL(nds32->enter_debug_state(nds32, true));
1809 /********* TODO: maybe create another function to handle this part */
1810
1811 /* restore DSSIM */
1812 if (nds32->step_isr_enable) {
1813 nds32_get_mapped_reg(nds32, IR14, &ir14_value);
1814 ir14_value &= ~(0x1 << 31);
1815 nds32_set_mapped_reg(nds32, IR14, ir14_value);
1816 }
1817
1818 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_HALTED));
1819
1820 return ERROR_OK;
1821 }
1822
1823 static int nds32_step_without_watchpoint(struct nds32 *nds32)
1824 {
1825 struct target *target = nds32->target;
1826
1827 if (target->state != TARGET_HALTED) {
1828 LOG_WARNING("target was not halted");
1829 return ERROR_TARGET_NOT_HALTED;
1830 }
1831
1832 /** set DSSIM */
1833 uint32_t ir14_value;
1834 nds32_get_mapped_reg(nds32, IR14, &ir14_value);
1835 if (nds32->step_isr_enable)
1836 ir14_value |= (0x1 << 31);
1837 else
1838 ir14_value &= ~(0x1 << 31);
1839 nds32_set_mapped_reg(nds32, IR14, ir14_value);
1840
1841 /********* TODO: maybe create another function to handle this part */
1842 CHECK_RETVAL(nds32->leave_debug_state(nds32, false));
1843
1844 struct aice_port_s *aice = target_to_aice(target);
1845
1846 if (ERROR_OK != aice_step(aice))
1847 return ERROR_FAIL;
1848
1849 /* save state */
1850 CHECK_RETVAL(nds32->enter_debug_state(nds32, false));
1851 /********* TODO: maybe create another function to handle this part */
1852
1853 /* restore DSSIM */
1854 if (nds32->step_isr_enable) {
1855 nds32_get_mapped_reg(nds32, IR14, &ir14_value);
1856 ir14_value &= ~(0x1 << 31);
1857 nds32_set_mapped_reg(nds32, IR14, ir14_value);
1858 }
1859
1860 return ERROR_OK;
1861 }
1862
1863 int nds32_target_state(struct nds32 *nds32, enum target_state *state)
1864 {
1865 struct aice_port_s *aice = target_to_aice(nds32->target);
1866 enum aice_target_state_s nds32_state;
1867
1868 if (aice_state(aice, &nds32_state) != ERROR_OK)
1869 return ERROR_FAIL;
1870
1871 switch (nds32_state) {
1872 case AICE_DISCONNECT:
1873 LOG_INFO("USB is disconnected");
1874 return ERROR_FAIL;
1875 case AICE_TARGET_DETACH:
1876 LOG_INFO("Target is disconnected");
1877 return ERROR_FAIL;
1878 case AICE_TARGET_UNKNOWN:
1879 *state = TARGET_UNKNOWN;
1880 break;
1881 case AICE_TARGET_RUNNING:
1882 *state = TARGET_RUNNING;
1883 break;
1884 case AICE_TARGET_HALTED:
1885 *state = TARGET_HALTED;
1886 break;
1887 case AICE_TARGET_RESET:
1888 *state = TARGET_RESET;
1889 break;
1890 case AICE_TARGET_DEBUG_RUNNING:
1891 *state = TARGET_DEBUG_RUNNING;
1892 break;
1893 default:
1894 return ERROR_FAIL;
1895 }
1896
1897 return ERROR_OK;
1898 }
1899
1900 int nds32_examine_debug_reason(struct nds32 *nds32)
1901 {
1902 uint32_t reason;
1903 struct target *target = nds32->target;
1904
1905 if (nds32->hit_syscall == true) {
1906 LOG_DEBUG("Hit syscall breakpoint");
1907 target->debug_reason = DBG_REASON_BREAKPOINT;
1908 return ERROR_OK;
1909 }
1910
1911 nds32->get_debug_reason(nds32, &reason);
1912
1913 LOG_DEBUG("nds32 examines debug reason: %s", nds32_debug_type_name[reason]);
1914
1915 /* Examine debug reason */
1916 switch (reason) {
1917 case NDS32_DEBUG_BREAK:
1918 case NDS32_DEBUG_BREAK_16:
1919 case NDS32_DEBUG_INST_BREAK:
1920 {
1921 uint32_t value_pc;
1922 uint32_t opcode;
1923 struct nds32_instruction instruction;
1924
1925 nds32_get_mapped_reg(nds32, PC, &value_pc);
1926
1927 if (ERROR_OK != nds32_read_opcode(nds32, value_pc, &opcode))
1928 return ERROR_FAIL;
1929 if (ERROR_OK != nds32_evaluate_opcode(nds32, opcode, value_pc,
1930 &instruction))
1931 return ERROR_FAIL;
1932
1933 /* hit 'break 0x7FFF' */
1934 if ((instruction.info.opc_6 == 0x32) &&
1935 (instruction.info.sub_opc == 0xA) &&
1936 (instruction.info.imm == 0x7FFF)) {
1937 target->debug_reason = DBG_REASON_EXIT;
1938 } else
1939 target->debug_reason = DBG_REASON_BREAKPOINT;
1940 }
1941 break;
1942 case NDS32_DEBUG_DATA_ADDR_WATCHPOINT_PRECISE:
1943 case NDS32_DEBUG_DATA_VALUE_WATCHPOINT_PRECISE:
1944 case NDS32_DEBUG_LOAD_STORE_GLOBAL_STOP: /* GLOBAL_STOP is precise exception */
1945 {
1946 int result;
1947
1948 result = nds32->get_watched_address(nds32,
1949 &(nds32->watched_address), reason);
1950 /* do single step(without watchpoints) to skip the "watched" instruction */
1951 nds32_step_without_watchpoint(nds32);
1952
1953 /* before single_step, save exception address */
1954 if (result != ERROR_OK)
1955 return ERROR_FAIL;
1956
1957 target->debug_reason = DBG_REASON_WATCHPOINT;
1958 }
1959 break;
1960 case NDS32_DEBUG_DEBUG_INTERRUPT:
1961 target->debug_reason = DBG_REASON_DBGRQ;
1962 break;
1963 case NDS32_DEBUG_HARDWARE_SINGLE_STEP:
1964 target->debug_reason = DBG_REASON_SINGLESTEP;
1965 break;
1966 case NDS32_DEBUG_DATA_VALUE_WATCHPOINT_IMPRECISE:
1967 case NDS32_DEBUG_DATA_ADDR_WATCHPOINT_NEXT_PRECISE:
1968 case NDS32_DEBUG_DATA_VALUE_WATCHPOINT_NEXT_PRECISE:
1969 if (ERROR_OK != nds32->get_watched_address(nds32,
1970 &(nds32->watched_address), reason))
1971 return ERROR_FAIL;
1972
1973 target->debug_reason = DBG_REASON_WATCHPOINT;
1974 break;
1975 default:
1976 target->debug_reason = DBG_REASON_UNDEFINED;
1977 break;
1978 }
1979
1980 return ERROR_OK;
1981 }
1982
1983 int nds32_login(struct nds32 *nds32)
1984 {
1985 struct target *target = nds32->target;
1986 struct aice_port_s *aice = target_to_aice(target);
1987 uint32_t passcode_length;
1988 char command_sequence[129];
1989 char command_str[33];
1990 char code_str[9];
1991 uint32_t copy_length;
1992 uint32_t code;
1993 uint32_t i;
1994
1995 LOG_DEBUG("nds32_login");
1996
1997 if (nds32->edm_passcode != NULL) {
1998 /* convert EDM passcode to command sequences */
1999 passcode_length = strlen(nds32->edm_passcode);
2000 command_sequence[0] = '\0';
2001 for (i = 0; i < passcode_length; i += 8) {
2002 if (passcode_length - i < 8)
2003 copy_length = passcode_length - i;
2004 else
2005 copy_length = 8;
2006
2007 strncpy(code_str, nds32->edm_passcode + i, copy_length);
2008 code_str[copy_length] = '\0';
2009 code = strtoul(code_str, NULL, 16);
2010
2011 sprintf(command_str, "write_misc gen_port0 0x%" PRIx32 ";", code);
2012 strcat(command_sequence, command_str);
2013 }
2014
2015 if (ERROR_OK != aice_program_edm(aice, command_sequence))
2016 return ERROR_FAIL;
2017
2018 /* get current privilege level */
2019 uint32_t value_edmsw;
2020 aice_read_debug_reg(aice, NDS_EDM_SR_EDMSW, &value_edmsw);
2021 nds32->privilege_level = (value_edmsw >> 16) & 0x3;
2022 LOG_INFO("Current privilege level: %d", nds32->privilege_level);
2023 }
2024
2025 if (nds32_edm_ops_num > 0) {
2026 const char *reg_name;
2027 for (i = 0 ; i < nds32_edm_ops_num ; i++) {
2028 code = nds32_edm_ops[i].value;
2029 if (nds32_edm_ops[i].reg_no == 6)
2030 reg_name = "gen_port0";
2031 else if (nds32_edm_ops[i].reg_no == 7)
2032 reg_name = "gen_port1";
2033 else
2034 return ERROR_FAIL;
2035
2036 sprintf(command_str, "write_misc %s 0x%" PRIx32 ";", reg_name, code);
2037 if (ERROR_OK != aice_program_edm(aice, command_str))
2038 return ERROR_FAIL;
2039 }
2040 }
2041
2042 return ERROR_OK;
2043 }
2044
2045 int nds32_halt(struct target *target)
2046 {
2047 struct nds32 *nds32 = target_to_nds32(target);
2048 struct aice_port_s *aice = target_to_aice(target);
2049 enum target_state state;
2050
2051 LOG_DEBUG("target->state: %s",
2052 target_state_name(target));
2053
2054 if (target->state == TARGET_HALTED) {
2055 LOG_DEBUG("target was already halted");
2056 return ERROR_OK;
2057 }
2058
2059 if (nds32_target_state(nds32, &state) != ERROR_OK)
2060 return ERROR_FAIL;
2061
2062 if (state != TARGET_HALTED)
2063 /* TODO: if state == TARGET_HALTED, check ETYPE is DBGI or not */
2064 if (ERROR_OK != aice_halt(aice))
2065 return ERROR_FAIL;
2066
2067 CHECK_RETVAL(nds32->enter_debug_state(nds32, true));
2068
2069 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_HALTED));
2070
2071 return ERROR_OK;
2072 }
2073
2074 /* poll current target status */
2075 int nds32_poll(struct target *target)
2076 {
2077 struct nds32 *nds32 = target_to_nds32(target);
2078 enum target_state state;
2079
2080 if (nds32_target_state(nds32, &state) != ERROR_OK)
2081 return ERROR_FAIL;
2082
2083 if (state == TARGET_HALTED) {
2084 if (target->state != TARGET_HALTED) {
2085 /* if false_hit, continue free_run */
2086 if (ERROR_OK != nds32->enter_debug_state(nds32, true)) {
2087 struct aice_port_s *aice = target_to_aice(target);
2088 aice_run(aice);
2089 return ERROR_OK;
2090 }
2091
2092 LOG_DEBUG("Change target state to TARGET_HALTED.");
2093
2094 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
2095 }
2096 } else if (state == TARGET_RESET) {
2097 if (target->state == TARGET_HALTED) {
2098 /* similar to assert srst */
2099 register_cache_invalidate(nds32->core_cache);
2100 target->state = TARGET_RESET;
2101
2102 /* TODO: deassert srst */
2103 } else if (target->state == TARGET_RUNNING) {
2104 /* reset as running */
2105 LOG_WARNING("<-- TARGET WARNING! The debug target has been reset. -->");
2106 }
2107 } else {
2108 if (target->state != TARGET_RUNNING && target->state != TARGET_DEBUG_RUNNING) {
2109 LOG_DEBUG("Change target state to TARGET_RUNNING.");
2110 target->state = TARGET_RUNNING;
2111 target->debug_reason = DBG_REASON_NOTHALTED;
2112 }
2113 }
2114
2115 return ERROR_OK;
2116 }
2117
2118 int nds32_resume(struct target *target, int current,
2119 target_addr_t address, int handle_breakpoints, int debug_execution)
2120 {
2121 LOG_DEBUG("current %d address %08" TARGET_PRIxADDR
2122 " handle_breakpoints %d"
2123 " debug_execution %d",
2124 current, address, handle_breakpoints, debug_execution);
2125
2126 struct nds32 *nds32 = target_to_nds32(target);
2127
2128 if (target->state != TARGET_HALTED) {
2129 LOG_ERROR("Target not halted");
2130 return ERROR_TARGET_NOT_HALTED;
2131 }
2132
2133 address = nds32_nextpc(nds32, current, address);
2134
2135 LOG_DEBUG("RESUME PC %08" TARGET_PRIxADDR "%s", address, !current ? "!" : "");
2136
2137 if (!debug_execution)
2138 target_free_all_working_areas(target);
2139
2140 /* Disable HSS to avoid users misuse HSS */
2141 if (nds32_reach_max_interrupt_level(nds32) == false) {
2142 uint32_t value_ir0;
2143 nds32_get_mapped_reg(nds32, IR0, &value_ir0);
2144 value_ir0 &= ~(0x1 << 11);
2145 nds32_set_mapped_reg(nds32, IR0, value_ir0);
2146 }
2147
2148 CHECK_RETVAL(nds32->leave_debug_state(nds32, true));
2149 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_RESUMED));
2150
2151 if (nds32->virtual_hosting_ctrl_c == false) {
2152 struct aice_port_s *aice = target_to_aice(target);
2153 aice_run(aice);
2154 } else
2155 nds32->virtual_hosting_ctrl_c = false;
2156
2157 target->debug_reason = DBG_REASON_NOTHALTED;
2158 if (!debug_execution)
2159 target->state = TARGET_RUNNING;
2160 else
2161 target->state = TARGET_DEBUG_RUNNING;
2162
2163 LOG_DEBUG("target->state: %s",
2164 target_state_name(target));
2165
2166 return ERROR_OK;
2167 }
2168
2169 static int nds32_soft_reset_halt(struct target *target)
2170 {
2171 /* TODO: test it */
2172 struct nds32 *nds32 = target_to_nds32(target);
2173 struct aice_port_s *aice = target_to_aice(target);
2174
2175 aice_assert_srst(aice, AICE_SRST);
2176
2177 /* halt core and set pc to 0x0 */
2178 int retval = target_halt(target);
2179 if (retval != ERROR_OK)
2180 return retval;
2181
2182 /* start fetching from IVB */
2183 uint32_t value_ir3;
2184 nds32_get_mapped_reg(nds32, IR3, &value_ir3);
2185 nds32_set_mapped_reg(nds32, PC, value_ir3 & 0xFFFF0000);
2186
2187 return ERROR_OK;
2188 }
2189
2190 int nds32_assert_reset(struct target *target)
2191 {
2192 struct nds32 *nds32 = target_to_nds32(target);
2193 struct aice_port_s *aice = target_to_aice(target);
2194 struct nds32_cpu_version *cpu_version = &(nds32->cpu_version);
2195
2196 /* TODO: apply hw reset signal in not examined state */
2197 if (!(target_was_examined(target))) {
2198 LOG_WARNING("Reset is not asserted because the target is not examined.");
2199 LOG_WARNING("Use a reset button or power cycle the target.");
2200 return ERROR_TARGET_NOT_EXAMINED;
2201 }
2202
2203 if (target->reset_halt) {
2204 if ((nds32->soft_reset_halt)
2205 || (nds32->edm.version < 0x51)
2206 || ((nds32->edm.version == 0x51)
2207 && (cpu_version->revision == 0x1C)
2208 && (cpu_version->cpu_id_family == 0xC)
2209 && (cpu_version->cpu_id_version == 0x0)))
2210 nds32_soft_reset_halt(target);
2211 else
2212 aice_assert_srst(aice, AICE_RESET_HOLD);
2213 } else {
2214 aice_assert_srst(aice, AICE_SRST);
2215 alive_sleep(nds32->boot_time);
2216 }
2217
2218 /* set passcode for secure MCU after core reset */
2219 nds32_login(nds32);
2220
2221 /* registers are now invalid */
2222 register_cache_invalidate(nds32->core_cache);
2223
2224 target->state = TARGET_RESET;
2225
2226 return ERROR_OK;
2227 }
2228
2229 static int nds32_gdb_attach(struct nds32 *nds32)
2230 {
2231 LOG_DEBUG("nds32_gdb_attach, target coreid: %" PRId32, nds32->target->coreid);
2232
2233 if (nds32->attached == false) {
2234
2235 if (nds32->keep_target_edm_ctl) {
2236 /* backup target EDM_CTL */
2237 struct aice_port_s *aice = target_to_aice(nds32->target);
2238 aice_read_debug_reg(aice, NDS_EDM_SR_EDM_CTL, &nds32->backup_edm_ctl);
2239 }
2240
2241 target_halt(nds32->target);
2242
2243 nds32->attached = true;
2244 }
2245
2246 return ERROR_OK;
2247 }
2248
2249 static int nds32_gdb_detach(struct nds32 *nds32)
2250 {
2251 LOG_DEBUG("nds32_gdb_detach");
2252 bool backup_virtual_hosting_setting;
2253
2254 if (nds32->attached) {
2255
2256 backup_virtual_hosting_setting = nds32->virtual_hosting;
2257 /* turn off virtual hosting before resume as gdb-detach */
2258 nds32->virtual_hosting = false;
2259 target_resume(nds32->target, 1, 0, 0, 0);
2260 nds32->virtual_hosting = backup_virtual_hosting_setting;
2261
2262 if (nds32->keep_target_edm_ctl) {
2263 /* restore target EDM_CTL */
2264 struct aice_port_s *aice = target_to_aice(nds32->target);
2265 aice_write_debug_reg(aice, NDS_EDM_SR_EDM_CTL, nds32->backup_edm_ctl);
2266 }
2267
2268 nds32->attached = false;
2269 }
2270
2271 return ERROR_OK;
2272 }
2273
2274 static int nds32_callback_event_handler(struct target *target,
2275 enum target_event event, void *priv)
2276 {
2277 int retval = ERROR_OK;
2278 int target_number = *(int *)priv;
2279
2280 if (target_number != target->target_number)
2281 return ERROR_OK;
2282
2283 struct nds32 *nds32 = target_to_nds32(target);
2284
2285 switch (event) {
2286 case TARGET_EVENT_GDB_ATTACH:
2287 retval = nds32_gdb_attach(nds32);
2288 break;
2289 case TARGET_EVENT_GDB_DETACH:
2290 retval = nds32_gdb_detach(nds32);
2291 break;
2292 default:
2293 break;
2294 }
2295
2296 return retval;
2297 }
2298
2299 int nds32_init(struct nds32 *nds32)
2300 {
2301 /* Initialize anything we can set up without talking to the target */
2302 nds32->memory.access_channel = NDS_MEMORY_ACC_CPU;
2303
2304 /* register event callback */
2305 target_register_event_callback(nds32_callback_event_handler,
2306 &(nds32->target->target_number));
2307
2308 return ERROR_OK;
2309 }
2310
2311 int nds32_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
2312 {
2313 /* fill syscall parameters to file-I/O info */
2314 if (NULL == fileio_info) {
2315 LOG_ERROR("Target has not initial file-I/O data structure");
2316 return ERROR_FAIL;
2317 }
2318
2319 struct nds32 *nds32 = target_to_nds32(target);
2320 uint32_t value_ir6;
2321 uint32_t syscall_id;
2322
2323 if (nds32->hit_syscall == false)
2324 return ERROR_FAIL;
2325
2326 nds32_get_mapped_reg(nds32, IR6, &value_ir6);
2327 syscall_id = (value_ir6 >> 16) & 0x7FFF;
2328 nds32->active_syscall_id = syscall_id;
2329
2330 LOG_DEBUG("hit syscall ID: 0x%" PRIx32, syscall_id);
2331
2332 /* free previous identifier storage */
2333 free(fileio_info->identifier);
2334 fileio_info->identifier = NULL;
2335
2336 uint32_t reg_r0, reg_r1, reg_r2;
2337 nds32_get_mapped_reg(nds32, R0, &reg_r0);
2338 nds32_get_mapped_reg(nds32, R1, &reg_r1);
2339 nds32_get_mapped_reg(nds32, R2, &reg_r2);
2340
2341 switch (syscall_id) {
2342 case NDS32_SYSCALL_EXIT:
2343 fileio_info->identifier = malloc(5);
2344 sprintf(fileio_info->identifier, "exit");
2345 fileio_info->param_1 = reg_r0;
2346 break;
2347 case NDS32_SYSCALL_OPEN:
2348 {
2349 uint8_t filename[256];
2350 fileio_info->identifier = malloc(5);
2351 sprintf(fileio_info->identifier, "open");
2352 fileio_info->param_1 = reg_r0;
2353 /* reserve fileio_info->param_2 for length of path */
2354 fileio_info->param_3 = reg_r1;
2355 fileio_info->param_4 = reg_r2;
2356
2357 target->type->read_buffer(target, reg_r0, 256, filename);
2358 fileio_info->param_2 = strlen((char *)filename);
2359 }
2360 break;
2361 case NDS32_SYSCALL_CLOSE:
2362 fileio_info->identifier = malloc(6);
2363 sprintf(fileio_info->identifier, "close");
2364 fileio_info->param_1 = reg_r0;
2365 break;
2366 case NDS32_SYSCALL_READ:
2367 fileio_info->identifier = malloc(5);
2368 sprintf(fileio_info->identifier, "read");
2369 fileio_info->param_1 = reg_r0;
2370 fileio_info->param_2 = reg_r1;
2371 fileio_info->param_3 = reg_r2;
2372 break;
2373 case NDS32_SYSCALL_WRITE:
2374 fileio_info->identifier = malloc(6);
2375 sprintf(fileio_info->identifier, "write");
2376 fileio_info->param_1 = reg_r0;
2377 fileio_info->param_2 = reg_r1;
2378 fileio_info->param_3 = reg_r2;
2379 break;
2380 case NDS32_SYSCALL_LSEEK:
2381 fileio_info->identifier = malloc(6);
2382 sprintf(fileio_info->identifier, "lseek");
2383 fileio_info->param_1 = reg_r0;
2384 fileio_info->param_2 = reg_r1;
2385 fileio_info->param_3 = reg_r2;
2386 break;
2387 case NDS32_SYSCALL_UNLINK:
2388 {
2389 uint8_t filename[256];
2390 fileio_info->identifier = malloc(7);
2391 sprintf(fileio_info->identifier, "unlink");
2392 fileio_info->param_1 = reg_r0;
2393 /* reserve fileio_info->param_2 for length of path */
2394
2395 target->type->read_buffer(target, reg_r0, 256, filename);
2396 fileio_info->param_2 = strlen((char *)filename);
2397 }
2398 break;
2399 case NDS32_SYSCALL_RENAME:
2400 {
2401 uint8_t filename[256];
2402 fileio_info->identifier = malloc(7);
2403 sprintf(fileio_info->identifier, "rename");
2404 fileio_info->param_1 = reg_r0;
2405 /* reserve fileio_info->param_2 for length of old path */
2406 fileio_info->param_3 = reg_r1;
2407 /* reserve fileio_info->param_4 for length of new path */
2408
2409 target->type->read_buffer(target, reg_r0, 256, filename);
2410 fileio_info->param_2 = strlen((char *)filename);
2411
2412 target->type->read_buffer(target, reg_r1, 256, filename);
2413 fileio_info->param_4 = strlen((char *)filename);
2414 }
2415 break;
2416 case NDS32_SYSCALL_FSTAT:
2417 fileio_info->identifier = malloc(6);
2418 sprintf(fileio_info->identifier, "fstat");
2419 fileio_info->param_1 = reg_r0;
2420 fileio_info->param_2 = reg_r1;
2421 break;
2422 case NDS32_SYSCALL_STAT:
2423 {
2424 uint8_t filename[256];
2425 fileio_info->identifier = malloc(5);
2426 sprintf(fileio_info->identifier, "stat");
2427 fileio_info->param_1 = reg_r0;
2428 /* reserve fileio_info->param_2 for length of old path */
2429 fileio_info->param_3 = reg_r1;
2430
2431 target->type->read_buffer(target, reg_r0, 256, filename);
2432 fileio_info->param_2 = strlen((char *)filename) + 1;
2433 }
2434 break;
2435 case NDS32_SYSCALL_GETTIMEOFDAY:
2436 fileio_info->identifier = malloc(13);
2437 sprintf(fileio_info->identifier, "gettimeofday");
2438 fileio_info->param_1 = reg_r0;
2439 fileio_info->param_2 = reg_r1;
2440 break;
2441 case NDS32_SYSCALL_ISATTY:
2442 fileio_info->identifier = malloc(7);
2443 sprintf(fileio_info->identifier, "isatty");
2444 fileio_info->param_1 = reg_r0;
2445 break;
2446 case NDS32_SYSCALL_SYSTEM:
2447 {
2448 uint8_t command[256];
2449 fileio_info->identifier = malloc(7);
2450 sprintf(fileio_info->identifier, "system");
2451 fileio_info->param_1 = reg_r0;
2452 /* reserve fileio_info->param_2 for length of old path */
2453
2454 target->type->read_buffer(target, reg_r0, 256, command);
2455 fileio_info->param_2 = strlen((char *)command);
2456 }
2457 break;
2458 case NDS32_SYSCALL_ERRNO:
2459 fileio_info->identifier = malloc(6);
2460 sprintf(fileio_info->identifier, "errno");
2461 nds32_set_mapped_reg(nds32, R0, nds32->virtual_hosting_errno);
2462 break;
2463 default:
2464 fileio_info->identifier = malloc(8);
2465 sprintf(fileio_info->identifier, "unknown");
2466 break;
2467 }
2468
2469 return ERROR_OK;
2470 }
2471
2472 int nds32_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
2473 {
2474 LOG_DEBUG("syscall return code: 0x%x, errno: 0x%x , ctrl_c: %s",
2475 retcode, fileio_errno, ctrl_c ? "true" : "false");
2476
2477 struct nds32 *nds32 = target_to_nds32(target);
2478
2479 nds32_set_mapped_reg(nds32, R0, (uint32_t)retcode);
2480
2481 nds32->virtual_hosting_errno = fileio_errno;
2482 nds32->virtual_hosting_ctrl_c = ctrl_c;
2483 nds32->active_syscall_id = NDS32_SYSCALL_UNDEFINED;
2484
2485 return ERROR_OK;
2486 }
2487
2488 int nds32_profiling(struct target *target, uint32_t *samples,
2489 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2490 {
2491 /* sample $PC every 10 milliseconds */
2492 uint32_t iteration = seconds * 100;
2493 struct aice_port_s *aice = target_to_aice(target);
2494 struct nds32 *nds32 = target_to_nds32(target);
2495
2496 /* REVISIT: can nds32 profile without halting? */
2497 if (target->state != TARGET_HALTED) {
2498 LOG_WARNING("target %s is not halted (profiling)", target->cmd_name);
2499 return ERROR_TARGET_NOT_HALTED;
2500 }
2501
2502 if (max_num_samples < iteration)
2503 iteration = max_num_samples;
2504
2505 int pc_regnum = nds32->register_map(nds32, PC);
2506 aice_profiling(aice, 10, iteration, pc_regnum, samples, num_samples);
2507
2508 register_cache_invalidate(nds32->core_cache);
2509
2510 return ERROR_OK;
2511 }
2512
2513 int nds32_gdb_fileio_write_memory(struct nds32 *nds32, uint32_t address,
2514 uint32_t size, const uint8_t *buffer)
2515 {
2516 if ((NDS32_SYSCALL_FSTAT == nds32->active_syscall_id) ||
2517 (NDS32_SYSCALL_STAT == nds32->active_syscall_id)) {
2518 /* If doing GDB file-I/O, target should convert 'struct stat'
2519 * from gdb-format to target-format */
2520 uint8_t stat_buffer[NDS32_STRUCT_STAT_SIZE];
2521 /* st_dev 2 */
2522 stat_buffer[0] = buffer[3];
2523 stat_buffer[1] = buffer[2];
2524 /* st_ino 2 */
2525 stat_buffer[2] = buffer[7];
2526 stat_buffer[3] = buffer[6];
2527 /* st_mode 4 */
2528 stat_buffer[4] = buffer[11];
2529 stat_buffer[5] = buffer[10];
2530 stat_buffer[6] = buffer[9];
2531 stat_buffer[7] = buffer[8];
2532 /* st_nlink 2 */
2533 stat_buffer[8] = buffer[15];
2534 stat_buffer[9] = buffer[16];
2535 /* st_uid 2 */
2536 stat_buffer[10] = buffer[19];
2537 stat_buffer[11] = buffer[18];
2538 /* st_gid 2 */
2539 stat_buffer[12] = buffer[23];
2540 stat_buffer[13] = buffer[22];
2541 /* st_rdev 2 */
2542 stat_buffer[14] = buffer[27];
2543 stat_buffer[15] = buffer[26];
2544 /* st_size 4 */
2545 stat_buffer[16] = buffer[35];
2546 stat_buffer[17] = buffer[34];
2547 stat_buffer[18] = buffer[33];
2548 stat_buffer[19] = buffer[32];
2549 /* st_atime 4 */
2550 stat_buffer[20] = buffer[55];
2551 stat_buffer[21] = buffer[54];
2552 stat_buffer[22] = buffer[53];
2553 stat_buffer[23] = buffer[52];
2554 /* st_spare1 4 */
2555 stat_buffer[24] = 0;
2556 stat_buffer[25] = 0;
2557 stat_buffer[26] = 0;
2558 stat_buffer[27] = 0;
2559 /* st_mtime 4 */
2560 stat_buffer[28] = buffer[59];
2561 stat_buffer[29] = buffer[58];
2562 stat_buffer[30] = buffer[57];
2563 stat_buffer[31] = buffer[56];
2564 /* st_spare2 4 */
2565 stat_buffer[32] = 0;
2566 stat_buffer[33] = 0;
2567 stat_buffer[34] = 0;
2568 stat_buffer[35] = 0;
2569 /* st_ctime 4 */
2570 stat_buffer[36] = buffer[63];
2571 stat_buffer[37] = buffer[62];
2572 stat_buffer[38] = buffer[61];
2573 stat_buffer[39] = buffer[60];
2574 /* st_spare3 4 */
2575 stat_buffer[40] = 0;
2576 stat_buffer[41] = 0;
2577 stat_buffer[42] = 0;
2578 stat_buffer[43] = 0;
2579 /* st_blksize 4 */
2580 stat_buffer[44] = buffer[43];
2581 stat_buffer[45] = buffer[42];
2582 stat_buffer[46] = buffer[41];
2583 stat_buffer[47] = buffer[40];
2584 /* st_blocks 4 */
2585 stat_buffer[48] = buffer[51];
2586 stat_buffer[49] = buffer[50];
2587 stat_buffer[50] = buffer[49];
2588 stat_buffer[51] = buffer[48];
2589 /* st_spare4 8 */
2590 stat_buffer[52] = 0;
2591 stat_buffer[53] = 0;
2592 stat_buffer[54] = 0;
2593 stat_buffer[55] = 0;
2594 stat_buffer[56] = 0;
2595 stat_buffer[57] = 0;
2596 stat_buffer[58] = 0;
2597 stat_buffer[59] = 0;
2598
2599 return nds32_write_buffer(nds32->target, address, NDS32_STRUCT_STAT_SIZE, stat_buffer);
2600 } else if (NDS32_SYSCALL_GETTIMEOFDAY == nds32->active_syscall_id) {
2601 /* If doing GDB file-I/O, target should convert 'struct timeval'
2602 * from gdb-format to target-format */
2603 uint8_t timeval_buffer[NDS32_STRUCT_TIMEVAL_SIZE];
2604 timeval_buffer[0] = buffer[3];
2605 timeval_buffer[1] = buffer[2];
2606 timeval_buffer[2] = buffer[1];
2607 timeval_buffer[3] = buffer[0];
2608 timeval_buffer[4] = buffer[11];
2609 timeval_buffer[5] = buffer[10];
2610 timeval_buffer[6] = buffer[9];
2611 timeval_buffer[7] = buffer[8];
2612
2613 return nds32_write_buffer(nds32->target, address, NDS32_STRUCT_TIMEVAL_SIZE, timeval_buffer);
2614 }
2615
2616 return nds32_write_buffer(nds32->target, address, size, buffer);
2617 }
2618
2619 int nds32_reset_halt(struct nds32 *nds32)
2620 {
2621 LOG_INFO("reset halt as init");
2622
2623 struct aice_port_s *aice = target_to_aice(nds32->target);
2624 aice_assert_srst(aice, AICE_RESET_HOLD);
2625
2626 return ERROR_OK;
2627 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)