target/arc: Introduce L1I,L1D,L2 caches support
[openocd.git] / src / target / arc.c
1 /***************************************************************************
2 * Copyright (C) 2013-2015,2019-2020 Synopsys, Inc. *
3 * Frank Dols <frank.dols@synopsys.com> *
4 * Mischa Jonker <mischa.jonker@synopsys.com> *
5 * Anton Kolesov <anton.kolesov@synopsys.com> *
6 * Evgeniy Didin <didin@synopsys.com> *
7 * *
8 * SPDX-License-Identifier: GPL-2.0-or-later *
9 ***************************************************************************/
10
11
12 #ifdef HAVE_CONFIG_H
13 #include "config.h"
14 #endif
15
16 #include "arc.h"
17
18
19
20 /*
21 * ARC architecture specific details.
22 *
23 * ARC has two types of registers:
24 * 1) core registers(e.g. r0,r1..) [is_core = true]
25 * 2) Auxiliary registers [is_core = false]..
26 *
27 * Auxiliary registers at the same time can be divided into
28 * read-only BCR(build configuration regs, e.g. isa_config, mpu_build) and
29 * R/RW non-BCR ("control" register, e.g. pc, status32_t, debug).
30 *
31 * The way of accessing to Core and AUX registers differs on Jtag level.
32 * BCR/non-BCR describes if the register is immutable and that reading
33 * unexisting register is safe RAZ, rather then an error.
34 * Note, core registers cannot be BCR.
35 *
36 * In arc/cpu/ tcl files all regiters are defined as core, non-BCR aux
37 * and BCR aux, in "add-reg" command they are passed to three lists
38 * respectively: core_reg_descriptions, aux_reg_descriptions,
39 * bcr_reg_descriptions.
40 *
41 * Due to the specifics of accessing to BCR/non-BCR registers there are two
42 * register caches:
43 * 1) core_and_aux_cache - includes registers described in
44 * core_reg_descriptions and aux_reg_descriptions lists.
45 * Used during save/restore context step.
46 * 2) bcr_cache - includes registers described bcr_reg_descriptions.
47 * Currently used internally during configure step.
48 */
49
50
51
52 void arc_reg_data_type_add(struct target *target,
53 struct arc_reg_data_type *data_type)
54 {
55 LOG_DEBUG("Adding %s reg_data_type", data_type->data_type.id);
56 struct arc_common *arc = target_to_arc(target);
57 assert(arc);
58
59 list_add_tail(&data_type->list, &arc->reg_data_types);
60 }
61
62 /**
63 * Private implementation of register_get_by_name() for ARC that
64 * doesn't skip not [yet] existing registers. Used in many places
65 * for iteration through registers and even for marking required registers as
66 * existing.
67 */
68 struct reg *arc_reg_get_by_name(struct reg_cache *first,
69 const char *name, bool search_all)
70 {
71 unsigned int i;
72 struct reg_cache *cache = first;
73
74 while (cache) {
75 for (i = 0; i < cache->num_regs; i++) {
76 if (!strcmp(cache->reg_list[i].name, name))
77 return &(cache->reg_list[i]);
78 }
79
80 if (search_all)
81 cache = cache->next;
82 else
83 break;
84 }
85
86 return NULL;
87 }
88
89 /**
90 * Reset internal states of caches. Must be called when entering debugging.
91 *
92 * @param target Target for which to reset caches states.
93 */
94 int arc_reset_caches_states(struct target *target)
95 {
96 struct arc_common *arc = target_to_arc(target);
97
98 LOG_DEBUG("Resetting internal variables of caches states");
99
100 /* Reset caches states. */
101 arc->dcache_flushed = false;
102 arc->l2cache_flushed = false;
103 arc->icache_invalidated = false;
104 arc->dcache_invalidated = false;
105 arc->l2cache_invalidated = false;
106
107 return ERROR_OK;
108 }
109
110 /* Initialize arc_common structure, which passes to openocd target instance */
111 static int arc_init_arch_info(struct target *target, struct arc_common *arc,
112 struct jtag_tap *tap)
113 {
114 arc->common_magic = ARC_COMMON_MAGIC;
115 target->arch_info = arc;
116
117 arc->jtag_info.tap = tap;
118
119 /* The only allowed ir_length is 4 for ARC jtag. */
120 if (tap->ir_length != 4) {
121 LOG_ERROR("ARC jtag instruction length should be equal to 4");
122 return ERROR_FAIL;
123 }
124
125 /* On most ARC targets there is a dcache, so we enable its flushing
126 * by default. If there no dcache, there will be no error, just a slight
127 * performance penalty from unnecessary JTAG operations. */
128 arc->has_dcache = true;
129 arc->has_icache = true;
130 /* L2$ is not available in a target by default. */
131 arc->has_l2cache = false;
132 arc_reset_caches_states(target);
133
134 /* Add standard GDB data types */
135 INIT_LIST_HEAD(&arc->reg_data_types);
136 struct arc_reg_data_type *std_types = calloc(ARRAY_SIZE(standard_gdb_types),
137 sizeof(*std_types));
138
139 if (!std_types) {
140 LOG_ERROR("Unable to allocate memory");
141 return ERROR_FAIL;
142 }
143
144 for (unsigned int i = 0; i < ARRAY_SIZE(standard_gdb_types); i++) {
145 std_types[i].data_type.type = standard_gdb_types[i].type;
146 std_types[i].data_type.id = standard_gdb_types[i].id;
147 arc_reg_data_type_add(target, &(std_types[i]));
148 }
149
150 /* Fields related to target descriptions */
151 INIT_LIST_HEAD(&arc->core_reg_descriptions);
152 INIT_LIST_HEAD(&arc->aux_reg_descriptions);
153 INIT_LIST_HEAD(&arc->bcr_reg_descriptions);
154 arc->num_regs = 0;
155 arc->num_core_regs = 0;
156 arc->num_aux_regs = 0;
157 arc->num_bcr_regs = 0;
158 arc->last_general_reg = ULONG_MAX;
159 arc->pc_index_in_cache = ULONG_MAX;
160 arc->debug_index_in_cache = ULONG_MAX;
161
162 return ERROR_OK;
163 }
164
165 int arc_reg_add(struct target *target, struct arc_reg_desc *arc_reg,
166 const char * const type_name, const size_t type_name_len)
167 {
168 assert(target);
169 assert(arc_reg);
170
171 struct arc_common *arc = target_to_arc(target);
172 assert(arc);
173
174 /* Find register type */
175 {
176 struct arc_reg_data_type *type;
177 list_for_each_entry(type, &arc->reg_data_types, list)
178 if (!strncmp(type->data_type.id, type_name, type_name_len)) {
179 arc_reg->data_type = &(type->data_type);
180 break;
181 }
182
183 if (!arc_reg->data_type)
184 return ERROR_ARC_REGTYPE_NOT_FOUND;
185 }
186
187 if (arc_reg->is_core) {
188 list_add_tail(&arc_reg->list, &arc->core_reg_descriptions);
189 arc->num_core_regs += 1;
190 } else if (arc_reg->is_bcr) {
191 list_add_tail(&arc_reg->list, &arc->bcr_reg_descriptions);
192 arc->num_bcr_regs += 1;
193 } else {
194 list_add_tail(&arc_reg->list, &arc->aux_reg_descriptions);
195 arc->num_aux_regs += 1;
196 }
197 arc->num_regs += 1;
198
199 LOG_DEBUG(
200 "added register {name=%s, num=0x%x, type=%s%s%s%s}",
201 arc_reg->name, arc_reg->arch_num, arc_reg->data_type->id,
202 arc_reg->is_core ? ", core" : "", arc_reg->is_bcr ? ", bcr" : "",
203 arc_reg->is_general ? ", general" : ""
204 );
205
206 return ERROR_OK;
207 }
208
209 /* Reading core or aux register */
210 static int arc_get_register(struct reg *reg)
211 {
212 assert(reg);
213
214 struct arc_reg_desc *desc = reg->arch_info;
215 struct target *target = desc->target;
216 struct arc_common *arc = target_to_arc(target);
217
218 uint32_t value;
219
220 if (reg->valid) {
221 LOG_DEBUG("Get register (cached) gdb_num=%" PRIu32 ", name=%s, value=0x%" PRIx32,
222 reg->number, desc->name, target_buffer_get_u32(target, reg->value));
223 return ERROR_OK;
224 }
225
226 if (desc->is_core) {
227 /* Accessing to R61/R62 registers causes Jtag hang */
228 if (desc->arch_num == CORE_R61_NUM || desc->arch_num == CORE_R62_NUM) {
229 LOG_ERROR("It is forbidden to read core registers 61 and 62.");
230 return ERROR_FAIL;
231 }
232 CHECK_RETVAL(arc_jtag_read_core_reg_one(&arc->jtag_info, desc->arch_num,
233 &value));
234 } else {
235 CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info, desc->arch_num,
236 &value));
237 }
238
239 target_buffer_set_u32(target, reg->value, value);
240
241 /* If target is unhalted all register reads should be uncached. */
242 if (target->state == TARGET_HALTED)
243 reg->valid = true;
244 else
245 reg->valid = false;
246
247 reg->dirty = false;
248
249 LOG_DEBUG("Get register gdb_num=%" PRIu32 ", name=%s, value=0x%" PRIx32,
250 reg->number , desc->name, value);
251
252
253 return ERROR_OK;
254 }
255
256 /* Writing core or aux register */
257 static int arc_set_register(struct reg *reg, uint8_t *buf)
258 {
259 struct arc_reg_desc *desc = reg->arch_info;
260 struct target *target = desc->target;
261 uint32_t value = target_buffer_get_u32(target, buf);
262 /* Unlike "get" function "set" is supported only if target
263 * is in halt mode. Async writes are not supported yet. */
264 if (target->state != TARGET_HALTED)
265 return ERROR_TARGET_NOT_HALTED;
266
267 /* Accessing to R61/R62 registers causes Jtag hang */
268 if (desc->is_core && (desc->arch_num == CORE_R61_NUM ||
269 desc->arch_num == CORE_R62_NUM)) {
270 LOG_ERROR("It is forbidden to write core registers 61 and 62.");
271 return ERROR_FAIL;
272 }
273 target_buffer_set_u32(target, reg->value, value);
274
275 LOG_DEBUG("Set register gdb_num=%" PRIu32 ", name=%s, value=0x%08" PRIx32,
276 reg->number, desc->name, value);
277
278 reg->valid = true;
279 reg->dirty = true;
280
281 return ERROR_OK;
282 }
283
284 const struct reg_arch_type arc_reg_type = {
285 .get = arc_get_register,
286 .set = arc_set_register,
287 };
288
289 /* GDB register groups. For now we suport only general and "empty" */
290 static const char * const reg_group_general = "general";
291 static const char * const reg_group_other = "";
292
293 /* Common code to initialize `struct reg` for different registers: core, aux, bcr. */
294 static int arc_init_reg(struct target *target, struct reg *reg,
295 struct arc_reg_desc *reg_desc, unsigned long number)
296 {
297 assert(target);
298 assert(reg);
299 assert(reg_desc);
300
301 struct arc_common *arc = target_to_arc(target);
302
303 /* Initialize struct reg */
304 reg->name = reg_desc->name;
305 reg->size = 32; /* All register in ARC are 32-bit */
306 reg->value = &reg_desc->reg_value;
307 reg->type = &arc_reg_type;
308 reg->arch_info = reg_desc;
309 reg->caller_save = true; /* @todo should be configurable. */
310 reg->reg_data_type = reg_desc->data_type;
311 reg->feature = &reg_desc->feature;
312
313 reg->feature->name = reg_desc->gdb_xml_feature;
314
315 /* reg->number is used by OpenOCD as value for @regnum. Thus when setting
316 * value of a register GDB will use it as a number of register in
317 * P-packet. OpenOCD gdbserver will then use number of register in
318 * P-packet as an array index in the reg_list returned by
319 * arc_regs_get_gdb_reg_list. So to ensure that registers are assigned
320 * correctly it would be required to either sort registers in
321 * arc_regs_get_gdb_reg_list or to assign numbers sequentially here and
322 * according to how registers will be sorted in
323 * arc_regs_get_gdb_reg_list. Second options is much more simpler. */
324 reg->number = number;
325
326 if (reg_desc->is_general) {
327 arc->last_general_reg = reg->number;
328 reg->group = reg_group_general;
329 } else {
330 reg->group = reg_group_other;
331 }
332
333 return ERROR_OK;
334 }
335
336 /* Building aux/core reg_cache */
337 static int arc_build_reg_cache(struct target *target)
338 {
339 unsigned long i = 0;
340 struct arc_reg_desc *reg_desc;
341 /* get pointers to arch-specific information */
342 struct arc_common *arc = target_to_arc(target);
343 const unsigned long num_regs = arc->num_core_regs + arc->num_aux_regs;
344 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
345 struct reg_cache *cache = calloc(1, sizeof(*cache));
346 struct reg *reg_list = calloc(num_regs, sizeof(*reg_list));
347
348 if (!cache || !reg_list) {
349 LOG_ERROR("Not enough memory");
350 goto fail;
351 }
352
353 /* Build the process context cache */
354 cache->name = "arc registers";
355 cache->next = NULL;
356 cache->reg_list = reg_list;
357 cache->num_regs = num_regs;
358 arc->core_and_aux_cache = cache;
359 (*cache_p) = cache;
360
361 if (list_empty(&arc->core_reg_descriptions)) {
362 LOG_ERROR("No core registers were defined");
363 goto fail;
364 }
365
366 list_for_each_entry(reg_desc, &arc->core_reg_descriptions, list) {
367 CHECK_RETVAL(arc_init_reg(target, &reg_list[i], reg_desc, i));
368
369 LOG_DEBUG("reg n=%3li name=%3s group=%s feature=%s", i,
370 reg_list[i].name, reg_list[i].group,
371 reg_list[i].feature->name);
372
373 i += 1;
374 }
375
376 if (list_empty(&arc->aux_reg_descriptions)) {
377 LOG_ERROR("No aux registers were defined");
378 goto fail;
379 }
380
381 list_for_each_entry(reg_desc, &arc->aux_reg_descriptions, list) {
382 CHECK_RETVAL(arc_init_reg(target, &reg_list[i], reg_desc, i));
383
384 LOG_DEBUG("reg n=%3li name=%3s group=%s feature=%s", i,
385 reg_list[i].name, reg_list[i].group,
386 reg_list[i].feature->name);
387
388 /* PC and DEBUG are essential so we search for them. */
389 if (!strcmp("pc", reg_desc->name)) {
390 if (arc->pc_index_in_cache != ULONG_MAX) {
391 LOG_ERROR("Double definition of PC in configuration");
392 goto fail;
393 }
394 arc->pc_index_in_cache = i;
395 } else if (!strcmp("debug", reg_desc->name)) {
396 if (arc->debug_index_in_cache != ULONG_MAX) {
397 LOG_ERROR("Double definition of DEBUG in configuration");
398 goto fail;
399 }
400 arc->debug_index_in_cache = i;
401 }
402 i += 1;
403 }
404
405 if (arc->pc_index_in_cache == ULONG_MAX
406 || arc->debug_index_in_cache == ULONG_MAX) {
407 LOG_ERROR("`pc' and `debug' registers must be present in target description.");
408 goto fail;
409 }
410
411 assert(i == (arc->num_core_regs + arc->num_aux_regs));
412
413 arc->core_aux_cache_built = true;
414
415 return ERROR_OK;
416
417 fail:
418 free(cache);
419 free(reg_list);
420
421 return ERROR_FAIL;
422 }
423
424 /* Build bcr reg_cache.
425 * This function must be called only after arc_build_reg_cache */
426 static int arc_build_bcr_reg_cache(struct target *target)
427 {
428 /* get pointers to arch-specific information */
429 struct arc_common *arc = target_to_arc(target);
430 const unsigned long num_regs = arc->num_bcr_regs;
431 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
432 struct reg_cache *cache = malloc(sizeof(*cache));
433 struct reg *reg_list = calloc(num_regs, sizeof(*reg_list));
434
435 struct arc_reg_desc *reg_desc;
436 unsigned long i = 0;
437 unsigned long gdb_regnum = arc->core_and_aux_cache->num_regs;
438
439 if (!cache || !reg_list) {
440 LOG_ERROR("Unable to allocate memory");
441 goto fail;
442 }
443
444 /* Build the process context cache */
445 cache->name = "arc.bcr";
446 cache->next = NULL;
447 cache->reg_list = reg_list;
448 cache->num_regs = num_regs;
449 arc->bcr_cache = cache;
450 (*cache_p) = cache;
451
452 if (list_empty(&arc->bcr_reg_descriptions)) {
453 LOG_ERROR("No BCR registers are defined");
454 goto fail;
455 }
456
457 list_for_each_entry(reg_desc, &arc->bcr_reg_descriptions, list) {
458 CHECK_RETVAL(arc_init_reg(target, &reg_list[i], reg_desc, gdb_regnum));
459 /* BCRs always semantically, they are just read-as-zero, if there is
460 * not real register. */
461 reg_list[i].exist = true;
462
463 LOG_DEBUG("reg n=%3li name=%3s group=%s feature=%s", i,
464 reg_list[i].name, reg_list[i].group,
465 reg_list[i].feature->name);
466 i += 1;
467 gdb_regnum += 1;
468 }
469
470 assert(i == arc->num_bcr_regs);
471
472 arc->bcr_cache_built = true;
473
474
475 return ERROR_OK;
476 fail:
477 free(cache);
478 free(reg_list);
479
480 return ERROR_FAIL;
481 }
482
483
484 static int arc_get_gdb_reg_list(struct target *target, struct reg **reg_list[],
485 int *reg_list_size, enum target_register_class reg_class)
486 {
487 assert(target->reg_cache);
488 struct arc_common *arc = target_to_arc(target);
489
490 /* get pointers to arch-specific information storage */
491 *reg_list_size = arc->num_regs;
492 *reg_list = calloc(*reg_list_size, sizeof(struct reg *));
493
494 if (!*reg_list) {
495 LOG_ERROR("Unable to allocate memory");
496 return ERROR_FAIL;
497 }
498
499 /* OpenOCD gdb_server API seems to be inconsistent here: when it generates
500 * XML tdesc it filters out !exist registers, however when creating a
501 * g-packet it doesn't do so. REG_CLASS_ALL is used in first case, and
502 * REG_CLASS_GENERAL used in the latter one. Due to this we had to filter
503 * out !exist register for "general", but not for "all". Attempts to filter out
504 * !exist for "all" as well will cause a failed check in OpenOCD GDB
505 * server. */
506 if (reg_class == REG_CLASS_ALL) {
507 unsigned long i = 0;
508 struct reg_cache *reg_cache = target->reg_cache;
509 while (reg_cache) {
510 for (unsigned j = 0; j < reg_cache->num_regs; j++, i++)
511 (*reg_list)[i] = &reg_cache->reg_list[j];
512 reg_cache = reg_cache->next;
513 }
514 assert(i == arc->num_regs);
515 LOG_DEBUG("REG_CLASS_ALL: number of regs=%i", *reg_list_size);
516 } else {
517 unsigned long i = 0;
518 unsigned long gdb_reg_number = 0;
519 struct reg_cache *reg_cache = target->reg_cache;
520 while (reg_cache) {
521 for (unsigned j = 0;
522 j < reg_cache->num_regs && gdb_reg_number <= arc->last_general_reg;
523 j++) {
524 if (reg_cache->reg_list[j].exist) {
525 (*reg_list)[i] = &reg_cache->reg_list[j];
526 i++;
527 }
528 gdb_reg_number += 1;
529 }
530 reg_cache = reg_cache->next;
531 }
532 *reg_list_size = i;
533 LOG_DEBUG("REG_CLASS_GENERAL: number of regs=%i", *reg_list_size);
534 }
535
536 return ERROR_OK;
537 }
538
539 /* Reading field of struct_type register */
540 int arc_reg_get_field(struct target *target, const char *reg_name,
541 const char *field_name, uint32_t *value_ptr)
542 {
543 struct reg_data_type_struct_field *field;
544
545 LOG_DEBUG("getting register field (reg_name=%s, field_name=%s)", reg_name, field_name);
546
547 /* Get register */
548 struct reg *reg = arc_reg_get_by_name(target->reg_cache, reg_name, true);
549
550 if (!reg) {
551 LOG_ERROR("Requested register `%s' doens't exist.", reg_name);
552 return ERROR_ARC_REGISTER_NOT_FOUND;
553 }
554
555 if (reg->reg_data_type->type != REG_TYPE_ARCH_DEFINED
556 || reg->reg_data_type->type_class != REG_TYPE_CLASS_STRUCT)
557 return ERROR_ARC_REGISTER_IS_NOT_STRUCT;
558
559 /* Get field in a register */
560 struct reg_data_type_struct *reg_struct =
561 reg->reg_data_type->reg_type_struct;
562 for (field = reg_struct->fields;
563 field;
564 field = field->next) {
565 if (!strcmp(field->name, field_name))
566 break;
567 }
568
569 if (!field)
570 return ERROR_ARC_REGISTER_FIELD_NOT_FOUND;
571
572 if (!field->use_bitfields)
573 return ERROR_ARC_FIELD_IS_NOT_BITFIELD;
574
575 if (!reg->valid)
576 CHECK_RETVAL(reg->type->get(reg));
577
578 /* First do endiannes-safe read of register value
579 * then convert it to binary buffer for further
580 * field extraction */
581
582 *value_ptr = buf_get_u32(reg->value, field->bitfield->start,
583 field->bitfield->end - field->bitfield->start + 1);
584
585 return ERROR_OK;
586 }
587
588 static int arc_get_register_value(struct target *target, const char *reg_name,
589 uint32_t *value_ptr)
590 {
591 LOG_DEBUG("reg_name=%s", reg_name);
592
593 struct reg *reg = arc_reg_get_by_name(target->reg_cache, reg_name, true);
594
595 if (!reg)
596 return ERROR_ARC_REGISTER_NOT_FOUND;
597
598 if (!reg->valid)
599 CHECK_RETVAL(reg->type->get(reg));
600
601 *value_ptr = target_buffer_get_u32(target, reg->value);
602
603 return ERROR_OK;
604 }
605
606
607 /* Configure DCCM's */
608 static int arc_configure_dccm(struct target *target)
609 {
610 struct arc_common *arc = target_to_arc(target);
611
612 uint32_t dccm_build_version, dccm_build_size0, dccm_build_size1;
613 CHECK_RETVAL(arc_reg_get_field(target, "dccm_build", "version",
614 &dccm_build_version));
615 CHECK_RETVAL(arc_reg_get_field(target, "dccm_build", "size0",
616 &dccm_build_size0));
617 CHECK_RETVAL(arc_reg_get_field(target, "dccm_build", "size1",
618 &dccm_build_size1));
619 /* There is no yet support of configurable number of cycles,
620 * So there is no difference between v3 and v4 */
621 if ((dccm_build_version == 3 || dccm_build_version == 4) && dccm_build_size0 > 0) {
622 CHECK_RETVAL(arc_get_register_value(target, "aux_dccm", &(arc->dccm_start)));
623 uint32_t dccm_size = 0x100;
624 dccm_size <<= dccm_build_size0;
625 if (dccm_build_size0 == 0xF)
626 dccm_size <<= dccm_build_size1;
627 arc->dccm_end = arc->dccm_start + dccm_size;
628 LOG_DEBUG("DCCM detected start=0x%" PRIx32 " end=0x%" PRIx32,
629 arc->dccm_start, arc->dccm_end);
630
631 }
632 return ERROR_OK;
633 }
634
635
636 /* Configure ICCM's */
637
638 static int arc_configure_iccm(struct target *target)
639 {
640 struct arc_common *arc = target_to_arc(target);
641
642 /* ICCM0 */
643 uint32_t iccm_build_version, iccm_build_size00, iccm_build_size01;
644 uint32_t aux_iccm = 0;
645 CHECK_RETVAL(arc_reg_get_field(target, "iccm_build", "version",
646 &iccm_build_version));
647 CHECK_RETVAL(arc_reg_get_field(target, "iccm_build", "iccm0_size0",
648 &iccm_build_size00));
649 CHECK_RETVAL(arc_reg_get_field(target, "iccm_build", "iccm0_size1",
650 &iccm_build_size01));
651 if (iccm_build_version == 4 && iccm_build_size00 > 0) {
652 CHECK_RETVAL(arc_get_register_value(target, "aux_iccm", &aux_iccm));
653 uint32_t iccm0_size = 0x100;
654 iccm0_size <<= iccm_build_size00;
655 if (iccm_build_size00 == 0xF)
656 iccm0_size <<= iccm_build_size01;
657 /* iccm0 start is located in highest 4 bits of aux_iccm */
658 arc->iccm0_start = aux_iccm & 0xF0000000;
659 arc->iccm0_end = arc->iccm0_start + iccm0_size;
660 LOG_DEBUG("ICCM0 detected start=0x%" PRIx32 " end=0x%" PRIx32,
661 arc->iccm0_start, arc->iccm0_end);
662 }
663
664 /* ICCM1 */
665 uint32_t iccm_build_size10, iccm_build_size11;
666 CHECK_RETVAL(arc_reg_get_field(target, "iccm_build", "iccm1_size0",
667 &iccm_build_size10));
668 CHECK_RETVAL(arc_reg_get_field(target, "iccm_build", "iccm1_size1",
669 &iccm_build_size11));
670 if (iccm_build_version == 4 && iccm_build_size10 > 0) {
671 /* Use value read for ICCM0 */
672 if (!aux_iccm)
673 CHECK_RETVAL(arc_get_register_value(target, "aux_iccm", &aux_iccm));
674 uint32_t iccm1_size = 0x100;
675 iccm1_size <<= iccm_build_size10;
676 if (iccm_build_size10 == 0xF)
677 iccm1_size <<= iccm_build_size11;
678 arc->iccm1_start = aux_iccm & 0x0F000000;
679 arc->iccm1_end = arc->iccm1_start + iccm1_size;
680 LOG_DEBUG("ICCM1 detected start=0x%" PRIx32 " end=0x%" PRIx32,
681 arc->iccm1_start, arc->iccm1_end);
682 }
683 return ERROR_OK;
684 }
685
686 /* Configure some core features, depending on BCRs. */
687 static int arc_configure(struct target *target)
688 {
689 LOG_DEBUG("Configuring ARC ICCM and DCCM");
690
691 /* Configuring DCCM if DCCM_BUILD and AUX_DCCM are known registers. */
692 if (arc_reg_get_by_name(target->reg_cache, "dccm_build", true) &&
693 arc_reg_get_by_name(target->reg_cache, "aux_dccm", true))
694 CHECK_RETVAL(arc_configure_dccm(target));
695
696 /* Configuring ICCM if ICCM_BUILD and AUX_ICCM are known registers. */
697 if (arc_reg_get_by_name(target->reg_cache, "iccm_build", true) &&
698 arc_reg_get_by_name(target->reg_cache, "aux_iccm", true))
699 CHECK_RETVAL(arc_configure_iccm(target));
700
701 return ERROR_OK;
702 }
703
704 /* arc_examine is function, which is used for all arc targets*/
705 static int arc_examine(struct target *target)
706 {
707 uint32_t status;
708 struct arc_common *arc = target_to_arc(target);
709
710 CHECK_RETVAL(arc_jtag_startup(&arc->jtag_info));
711
712 if (!target_was_examined(target)) {
713 CHECK_RETVAL(arc_jtag_status(&arc->jtag_info, &status));
714 if (status & ARC_JTAG_STAT_RU)
715 target->state = TARGET_RUNNING;
716 else
717 target->state = TARGET_HALTED;
718
719 /* Read BCRs and configure optional registers. */
720 CHECK_RETVAL(arc_configure(target));
721
722 target_set_examined(target);
723 }
724
725 return ERROR_OK;
726 }
727
728 static int arc_halt(struct target *target)
729 {
730 uint32_t value, irq_state;
731 struct arc_common *arc = target_to_arc(target);
732
733 LOG_DEBUG("target->state: %s", target_state_name(target));
734
735 if (target->state == TARGET_HALTED) {
736 LOG_DEBUG("target was already halted");
737 return ERROR_OK;
738 }
739
740 if (target->state == TARGET_UNKNOWN)
741 LOG_WARNING("target was in unknown state when halt was requested");
742
743 if (target->state == TARGET_RESET) {
744 if ((jtag_get_reset_config() & RESET_SRST_PULLS_TRST) && jtag_get_srst()) {
745 LOG_ERROR("can't request a halt while in reset if nSRST pulls nTRST");
746 return ERROR_TARGET_FAILURE;
747 } else {
748 target->debug_reason = DBG_REASON_DBGRQ;
749 }
750 }
751
752 /* Break (stop) processor.
753 * Do read-modify-write sequence, or DEBUG.UB will be reset unintentionally.
754 * We do not use here arc_get/set_core_reg functions here because they imply
755 * that the processor is already halted. */
756 CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info, AUX_DEBUG_REG, &value));
757 value |= SET_CORE_FORCE_HALT; /* set the HALT bit */
758 CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_DEBUG_REG, value));
759 alive_sleep(1);
760
761 /* Save current IRQ state */
762 CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info, AUX_STATUS32_REG, &irq_state));
763
764 if (irq_state & AUX_STATUS32_REG_IE_BIT)
765 arc->irq_state = 1;
766 else
767 arc->irq_state = 0;
768
769 /* update state and notify gdb*/
770 target->state = TARGET_HALTED;
771 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_HALTED));
772
773 /* some more debug information */
774 if (debug_level >= LOG_LVL_DEBUG) {
775 LOG_DEBUG("core stopped (halted) DEGUB-REG: 0x%08" PRIx32, value);
776 CHECK_RETVAL(arc_get_register_value(target, "status32", &value));
777 LOG_DEBUG("core STATUS32: 0x%08" PRIx32, value);
778 }
779
780 return ERROR_OK;
781 }
782
783 /**
784 * Read registers that are used in GDB g-packet. We don't read them one-by-one,
785 * but do that in one batch operation to improve speed. Calls to JTAG layer are
786 * expensive so it is better to make one big call that reads all necessary
787 * registers, instead of many calls, one for one register.
788 */
789 static int arc_save_context(struct target *target)
790 {
791 int retval = ERROR_OK;
792 unsigned int i;
793 struct arc_common *arc = target_to_arc(target);
794 struct reg *reg_list = arc->core_and_aux_cache->reg_list;
795
796 LOG_DEBUG("Saving aux and core registers values");
797 assert(reg_list);
798
799 /* It is assumed that there is at least one AUX register in the list, for
800 * example PC. */
801 const uint32_t core_regs_size = arc->num_core_regs * sizeof(uint32_t);
802 /* last_general_reg is inclusive number. To get count of registers it is
803 * required to do +1. */
804 const uint32_t regs_to_scan =
805 MIN(arc->last_general_reg + 1, arc->num_regs);
806 const uint32_t aux_regs_size = arc->num_aux_regs * sizeof(uint32_t);
807 uint32_t *core_values = malloc(core_regs_size);
808 uint32_t *aux_values = malloc(aux_regs_size);
809 uint32_t *core_addrs = malloc(core_regs_size);
810 uint32_t *aux_addrs = malloc(aux_regs_size);
811 unsigned int core_cnt = 0;
812 unsigned int aux_cnt = 0;
813
814 if (!core_values || !core_addrs || !aux_values || !aux_addrs) {
815 LOG_ERROR("Unable to allocate memory");
816 retval = ERROR_FAIL;
817 goto exit;
818 }
819
820 memset(core_values, 0xff, core_regs_size);
821 memset(core_addrs, 0xff, core_regs_size);
822 memset(aux_values, 0xff, aux_regs_size);
823 memset(aux_addrs, 0xff, aux_regs_size);
824
825 for (i = 0; i < MIN(arc->num_core_regs, regs_to_scan); i++) {
826 struct reg *reg = &(reg_list[i]);
827 struct arc_reg_desc *arc_reg = reg->arch_info;
828 if (!reg->valid && reg->exist) {
829 core_addrs[core_cnt] = arc_reg->arch_num;
830 core_cnt += 1;
831 }
832 }
833
834 for (i = arc->num_core_regs; i < regs_to_scan; i++) {
835 struct reg *reg = &(reg_list[i]);
836 struct arc_reg_desc *arc_reg = reg->arch_info;
837 if (!reg->valid && reg->exist) {
838 aux_addrs[aux_cnt] = arc_reg->arch_num;
839 aux_cnt += 1;
840 }
841 }
842
843 /* Read data from target. */
844 if (core_cnt > 0) {
845 retval = arc_jtag_read_core_reg(&arc->jtag_info, core_addrs, core_cnt, core_values);
846 if (ERROR_OK != retval) {
847 LOG_ERROR("Attempt to read core registers failed.");
848 retval = ERROR_FAIL;
849 goto exit;
850 }
851 }
852 if (aux_cnt > 0) {
853 retval = arc_jtag_read_aux_reg(&arc->jtag_info, aux_addrs, aux_cnt, aux_values);
854 if (ERROR_OK != retval) {
855 LOG_ERROR("Attempt to read aux registers failed.");
856 retval = ERROR_FAIL;
857 goto exit;
858 }
859 }
860
861 /* Parse core regs */
862 core_cnt = 0;
863 for (i = 0; i < MIN(arc->num_core_regs, regs_to_scan); i++) {
864 struct reg *reg = &(reg_list[i]);
865 struct arc_reg_desc *arc_reg = reg->arch_info;
866 if (!reg->valid && reg->exist) {
867 target_buffer_set_u32(target, reg->value, core_values[core_cnt]);
868 core_cnt += 1;
869 reg->valid = true;
870 reg->dirty = false;
871 LOG_DEBUG("Get core register regnum=%" PRIu32 ", name=%s, value=0x%08" PRIx32,
872 i, arc_reg->name, core_values[core_cnt]);
873 }
874 }
875
876 /* Parse aux regs */
877 aux_cnt = 0;
878 for (i = arc->num_core_regs; i < regs_to_scan; i++) {
879 struct reg *reg = &(reg_list[i]);
880 struct arc_reg_desc *arc_reg = reg->arch_info;
881 if (!reg->valid && reg->exist) {
882 target_buffer_set_u32(target, reg->value, aux_values[aux_cnt]);
883 aux_cnt += 1;
884 reg->valid = true;
885 reg->dirty = false;
886 LOG_DEBUG("Get aux register regnum=%" PRIu32 ", name=%s, value=0x%08" PRIx32,
887 i , arc_reg->name, aux_values[aux_cnt]);
888 }
889 }
890
891 exit:
892 free(core_values);
893 free(core_addrs);
894 free(aux_values);
895 free(aux_addrs);
896
897 return retval;
898 }
899
900 static int arc_examine_debug_reason(struct target *target)
901 {
902 uint32_t debug_bh;
903
904 /* Only check for reason if don't know it already. */
905 /* BTW After singlestep at this point core is not marked as halted, so
906 * reading from memory to get current instruction wouldn't work anyway. */
907 if (target->debug_reason == DBG_REASON_DBGRQ ||
908 target->debug_reason == DBG_REASON_SINGLESTEP) {
909 return ERROR_OK;
910 }
911
912 CHECK_RETVAL(arc_reg_get_field(target, "debug", "bh",
913 &debug_bh));
914
915 if (debug_bh) {
916 /* DEBUG.BH is set if core halted due to BRK instruction. */
917 target->debug_reason = DBG_REASON_BREAKPOINT;
918 } else {
919 /* TODO: Add Actionpoint check when AP support will be introduced*/
920 LOG_WARNING("Unknown debug reason");
921 }
922
923 return ERROR_OK;
924 }
925
926 static int arc_debug_entry(struct target *target)
927 {
928 CHECK_RETVAL(arc_save_context(target));
929
930 /* TODO: reset internal indicators of caches states, otherwise D$/I$
931 * will not be flushed/invalidated when required. */
932 CHECK_RETVAL(arc_reset_caches_states(target));
933 CHECK_RETVAL(arc_examine_debug_reason(target));
934
935 return ERROR_OK;
936 }
937
938 static int arc_poll(struct target *target)
939 {
940 uint32_t status, value;
941 struct arc_common *arc = target_to_arc(target);
942
943 /* gdb calls continuously through this arc_poll() function */
944 CHECK_RETVAL(arc_jtag_status(&arc->jtag_info, &status));
945
946 /* check for processor halted */
947 if (status & ARC_JTAG_STAT_RU) {
948 if (target->state != TARGET_RUNNING) {
949 LOG_WARNING("target is still running!");
950 target->state = TARGET_RUNNING;
951 }
952 return ERROR_OK;
953 }
954 /* In some cases JTAG status register indicates that
955 * processor is in halt mode, but processor is still running.
956 * We check halt bit of AUX STATUS32 register for setting correct state. */
957 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_RESET)) {
958 CHECK_RETVAL(arc_get_register_value(target, "status32", &value));
959 if (value & AUX_STATUS32_REG_HALT_BIT) {
960 LOG_DEBUG("ARC core in halt or reset state.");
961 /* Save context if target was not in reset state */
962 if (target->state == TARGET_RUNNING)
963 CHECK_RETVAL(arc_debug_entry(target));
964 target->state = TARGET_HALTED;
965 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_HALTED));
966 } else {
967 LOG_DEBUG("Discrepancy of STATUS32[0] HALT bit and ARC_JTAG_STAT_RU, "
968 "target is still running");
969 }
970
971 } else if (target->state == TARGET_DEBUG_RUNNING) {
972
973 target->state = TARGET_HALTED;
974 LOG_DEBUG("ARC core is in debug running mode");
975
976 CHECK_RETVAL(arc_debug_entry(target));
977
978 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED));
979 }
980
981 return ERROR_OK;
982 }
983
984 static int arc_assert_reset(struct target *target)
985 {
986 struct arc_common *arc = target_to_arc(target);
987 enum reset_types jtag_reset_config = jtag_get_reset_config();
988 bool srst_asserted = false;
989
990 LOG_DEBUG("target->state: %s", target_state_name(target));
991
992 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
993 /* allow scripts to override the reset event */
994
995 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
996 register_cache_invalidate(arc->core_and_aux_cache);
997 /* An ARC target might be in halt state after reset, so
998 * if script requested processor to resume, then it must
999 * be manually started to ensure that this request
1000 * is satisfied. */
1001 if (target->state == TARGET_HALTED && !target->reset_halt) {
1002 /* Resume the target and continue from the current
1003 * PC register value. */
1004 LOG_DEBUG("Starting CPU execution after reset");
1005 CHECK_RETVAL(target_resume(target, 1, 0, 0, 0));
1006 }
1007 target->state = TARGET_RESET;
1008
1009 return ERROR_OK;
1010 }
1011
1012 /* some cores support connecting while srst is asserted
1013 * use that mode if it has been configured */
1014 if (!(jtag_reset_config & RESET_SRST_PULLS_TRST) &&
1015 (jtag_reset_config & RESET_SRST_NO_GATING)) {
1016 jtag_add_reset(0, 1);
1017 srst_asserted = true;
1018 }
1019
1020 if (jtag_reset_config & RESET_HAS_SRST) {
1021 /* should issue a srst only, but we may have to assert trst as well */
1022 if (jtag_reset_config & RESET_SRST_PULLS_TRST)
1023 jtag_add_reset(1, 1);
1024 else if (!srst_asserted)
1025 jtag_add_reset(0, 1);
1026 }
1027
1028 target->state = TARGET_RESET;
1029 jtag_add_sleep(50000);
1030
1031 register_cache_invalidate(arc->core_and_aux_cache);
1032
1033 if (target->reset_halt)
1034 CHECK_RETVAL(target_halt(target));
1035
1036 return ERROR_OK;
1037 }
1038
1039 static int arc_deassert_reset(struct target *target)
1040 {
1041 LOG_DEBUG("target->state: %s", target_state_name(target));
1042
1043 /* deassert reset lines */
1044 jtag_add_reset(0, 0);
1045
1046 return ERROR_OK;
1047 }
1048
1049 static int arc_arch_state(struct target *target)
1050 {
1051 uint32_t pc_value;
1052
1053 if (debug_level < LOG_LVL_DEBUG)
1054 return ERROR_OK;
1055
1056 CHECK_RETVAL(arc_get_register_value(target, "pc", &pc_value));
1057
1058 LOG_DEBUG("target state: %s; PC at: 0x%08" PRIx32,
1059 target_state_name(target),
1060 pc_value);
1061
1062 return ERROR_OK;
1063 }
1064
1065 /**
1066 * See arc_save_context() for reason why we want to dump all regs at once.
1067 * This however means that if there are dependencies between registers they
1068 * will not be observable until target will be resumed.
1069 */
1070 static int arc_restore_context(struct target *target)
1071 {
1072 int retval = ERROR_OK;
1073 unsigned int i;
1074 struct arc_common *arc = target_to_arc(target);
1075 struct reg *reg_list = arc->core_and_aux_cache->reg_list;
1076
1077 LOG_DEBUG("Restoring registers values");
1078 assert(reg_list);
1079
1080 const uint32_t core_regs_size = arc->num_core_regs * sizeof(uint32_t);
1081 const uint32_t aux_regs_size = arc->num_aux_regs * sizeof(uint32_t);
1082 uint32_t *core_values = malloc(core_regs_size);
1083 uint32_t *aux_values = malloc(aux_regs_size);
1084 uint32_t *core_addrs = malloc(core_regs_size);
1085 uint32_t *aux_addrs = malloc(aux_regs_size);
1086 unsigned int core_cnt = 0;
1087 unsigned int aux_cnt = 0;
1088
1089 if (!core_values || !core_addrs || !aux_values || !aux_addrs) {
1090 LOG_ERROR("Unable to allocate memory");
1091 retval = ERROR_FAIL;
1092 goto exit;
1093 }
1094
1095 memset(core_values, 0xff, core_regs_size);
1096 memset(core_addrs, 0xff, core_regs_size);
1097 memset(aux_values, 0xff, aux_regs_size);
1098 memset(aux_addrs, 0xff, aux_regs_size);
1099
1100 for (i = 0; i < arc->num_core_regs; i++) {
1101 struct reg *reg = &(reg_list[i]);
1102 struct arc_reg_desc *arc_reg = reg->arch_info;
1103 if (reg->valid && reg->exist && reg->dirty) {
1104 LOG_DEBUG("Will write regnum=%u", i);
1105 core_addrs[core_cnt] = arc_reg->arch_num;
1106 core_values[core_cnt] = target_buffer_get_u32(target, reg->value);
1107 core_cnt += 1;
1108 }
1109 }
1110
1111 for (i = 0; i < arc->num_aux_regs; i++) {
1112 struct reg *reg = &(reg_list[arc->num_core_regs + i]);
1113 struct arc_reg_desc *arc_reg = reg->arch_info;
1114 if (reg->valid && reg->exist && reg->dirty) {
1115 LOG_DEBUG("Will write regnum=%lu", arc->num_core_regs + i);
1116 aux_addrs[aux_cnt] = arc_reg->arch_num;
1117 aux_values[aux_cnt] = target_buffer_get_u32(target, reg->value);
1118 aux_cnt += 1;
1119 }
1120 }
1121
1122 /* Write data to target.
1123 * Check before write, if aux and core count is greater than 0. */
1124 if (core_cnt > 0) {
1125 retval = arc_jtag_write_core_reg(&arc->jtag_info, core_addrs, core_cnt, core_values);
1126 if (ERROR_OK != retval) {
1127 LOG_ERROR("Attempt to write to core registers failed.");
1128 retval = ERROR_FAIL;
1129 goto exit;
1130 }
1131 }
1132
1133 if (aux_cnt > 0) {
1134 retval = arc_jtag_write_aux_reg(&arc->jtag_info, aux_addrs, aux_cnt, aux_values);
1135 if (ERROR_OK != retval) {
1136 LOG_ERROR("Attempt to write to aux registers failed.");
1137 retval = ERROR_FAIL;
1138 goto exit;
1139 }
1140 }
1141
1142 exit:
1143 free(core_values);
1144 free(core_addrs);
1145 free(aux_values);
1146 free(aux_addrs);
1147
1148 return retval;
1149 }
1150
1151 static int arc_enable_interrupts(struct target *target, int enable)
1152 {
1153 uint32_t value;
1154
1155 struct arc_common *arc = target_to_arc(target);
1156
1157 CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info, AUX_STATUS32_REG, &value));
1158
1159 if (enable) {
1160 /* enable interrupts */
1161 value |= SET_CORE_ENABLE_INTERRUPTS;
1162 CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_STATUS32_REG, value));
1163 LOG_DEBUG("interrupts enabled");
1164 } else {
1165 /* disable interrupts */
1166 value &= ~SET_CORE_ENABLE_INTERRUPTS;
1167 CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_STATUS32_REG, value));
1168 LOG_DEBUG("interrupts disabled");
1169 }
1170
1171 return ERROR_OK;
1172 }
1173
1174 static int arc_resume(struct target *target, int current, target_addr_t address,
1175 int handle_breakpoints, int debug_execution)
1176 {
1177 struct arc_common *arc = target_to_arc(target);
1178 uint32_t resume_pc = 0;
1179 uint32_t value;
1180 struct reg *pc = &arc->core_and_aux_cache->reg_list[arc->pc_index_in_cache];
1181
1182 LOG_DEBUG("current:%i, address:0x%08" TARGET_PRIxADDR ", handle_breakpoints(not supported yet):%i,"
1183 " debug_execution:%i", current, address, handle_breakpoints, debug_execution);
1184
1185 /* We need to reset ARC cache variables so caches
1186 * would be invalidated and actual data
1187 * would be fetched from memory. */
1188 CHECK_RETVAL(arc_reset_caches_states(target));
1189
1190 if (target->state != TARGET_HALTED) {
1191 LOG_WARNING("target not halted");
1192 return ERROR_TARGET_NOT_HALTED;
1193 }
1194
1195 /* current = 1: continue on current PC, otherwise continue at <address> */
1196 if (!current) {
1197 target_buffer_set_u32(target, pc->value, address);
1198 pc->dirty = 1;
1199 pc->valid = 1;
1200 LOG_DEBUG("Changing the value of current PC to 0x%08" TARGET_PRIxADDR, address);
1201 }
1202
1203 if (!current)
1204 resume_pc = address;
1205 else
1206 resume_pc = target_buffer_get_u32(target, pc->value);
1207
1208 CHECK_RETVAL(arc_restore_context(target));
1209
1210 LOG_DEBUG("Target resumes from PC=0x%" PRIx32 ", pc.dirty=%i, pc.valid=%i",
1211 resume_pc, pc->dirty, pc->valid);
1212
1213 /* check if GDB tells to set our PC where to continue from */
1214 if ((pc->valid == 1) && (resume_pc == target_buffer_get_u32(target, pc->value))) {
1215 value = target_buffer_get_u32(target, pc->value);
1216 LOG_DEBUG("resume Core (when start-core) with PC @:0x%08" PRIx32, value);
1217 CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_PC_REG, value));
1218 }
1219
1220 /* Restore IRQ state if not in debug_execution*/
1221 if (!debug_execution)
1222 CHECK_RETVAL(arc_enable_interrupts(target, arc->irq_state));
1223 else
1224 CHECK_RETVAL(arc_enable_interrupts(target, !debug_execution));
1225
1226 target->debug_reason = DBG_REASON_NOTHALTED;
1227
1228 /* ready to get us going again */
1229 target->state = TARGET_RUNNING;
1230 CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info, AUX_STATUS32_REG, &value));
1231 value &= ~SET_CORE_HALT_BIT; /* clear the HALT bit */
1232 CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_STATUS32_REG, value));
1233 LOG_DEBUG("Core started to run");
1234
1235 /* registers are now invalid */
1236 register_cache_invalidate(arc->core_and_aux_cache);
1237
1238 if (!debug_execution) {
1239 target->state = TARGET_RUNNING;
1240 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_RESUMED));
1241 LOG_DEBUG("target resumed at 0x%08" PRIx32, resume_pc);
1242 } else {
1243 target->state = TARGET_DEBUG_RUNNING;
1244 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED));
1245 LOG_DEBUG("target debug resumed at 0x%08" PRIx32, resume_pc);
1246 }
1247
1248 return ERROR_OK;
1249 }
1250
1251 static int arc_init_target(struct command_context *cmd_ctx, struct target *target)
1252 {
1253 CHECK_RETVAL(arc_build_reg_cache(target));
1254 CHECK_RETVAL(arc_build_bcr_reg_cache(target));
1255 target->debug_reason = DBG_REASON_DBGRQ;
1256 return ERROR_OK;
1257 }
1258
1259 static void arc_free_reg_cache(struct reg_cache *cache)
1260 {
1261 free(cache->reg_list);
1262 free(cache);
1263 }
1264
1265 static void arc_deinit_target(struct target *target)
1266 {
1267 struct arc_common *arc = target_to_arc(target);
1268
1269 LOG_DEBUG("deinitialization of target");
1270 if (arc->core_aux_cache_built)
1271 arc_free_reg_cache(arc->core_and_aux_cache);
1272 if (arc->bcr_cache_built)
1273 arc_free_reg_cache(arc->bcr_cache);
1274
1275 struct arc_reg_data_type *type, *n;
1276 struct arc_reg_desc *desc, *k;
1277
1278 /* Free arc-specific reg_data_types allocations*/
1279 list_for_each_entry_safe_reverse(type, n, &arc->reg_data_types, list) {
1280 if (type->data_type.type_class == REG_TYPE_CLASS_STRUCT) {
1281 free(type->reg_type_struct_field);
1282 free(type->bitfields);
1283 free(type);
1284 } else if (type->data_type.type_class == REG_TYPE_CLASS_FLAGS) {
1285 free(type->reg_type_flags_field);
1286 free(type->bitfields);
1287 free(type);
1288 }
1289 }
1290
1291 /* Free standard_gdb_types reg_data_types allocations */
1292 type = list_first_entry(&arc->reg_data_types, struct arc_reg_data_type, list);
1293 free(type);
1294
1295 list_for_each_entry_safe(desc, k, &arc->aux_reg_descriptions, list)
1296 free_reg_desc(desc);
1297
1298 list_for_each_entry_safe(desc, k, &arc->core_reg_descriptions, list)
1299 free_reg_desc(desc);
1300
1301 list_for_each_entry_safe(desc, k, &arc->bcr_reg_descriptions, list)
1302 free_reg_desc(desc);
1303
1304 free(arc);
1305 }
1306
1307
1308 static int arc_target_create(struct target *target, Jim_Interp *interp)
1309 {
1310 struct arc_common *arc = calloc(1, sizeof(*arc));
1311
1312 if (!arc) {
1313 LOG_ERROR("Unable to allocate memory");
1314 return ERROR_FAIL;
1315 }
1316
1317 LOG_DEBUG("Entering");
1318 CHECK_RETVAL(arc_init_arch_info(target, arc, target->tap));
1319
1320 return ERROR_OK;
1321 }
1322
1323 /**
1324 * Write 4-byte instruction to memory. This is like target_write_u32, however
1325 * in case of little endian ARC instructions are in middle endian format, not
1326 * little endian, so different type of conversion should be done.
1327 * Middle endinan: instruction "aabbccdd", stored as "bbaaddcc"
1328 */
1329 int arc_write_instruction_u32(struct target *target, uint32_t address,
1330 uint32_t instr)
1331 {
1332 uint8_t value_buf[4];
1333 if (!target_was_examined(target)) {
1334 LOG_ERROR("Target not examined yet");
1335 return ERROR_FAIL;
1336 }
1337
1338 LOG_DEBUG("Address: 0x%08" PRIx32 ", value: 0x%08" PRIx32, address,
1339 instr);
1340
1341 if (target->endianness == TARGET_LITTLE_ENDIAN)
1342 arc_h_u32_to_me(value_buf, instr);
1343 else
1344 h_u32_to_be(value_buf, instr);
1345
1346 CHECK_RETVAL(target_write_buffer(target, address, 4, value_buf));
1347
1348 return ERROR_OK;
1349 }
1350
1351 /**
1352 * Read 32-bit instruction from memory. It is like target_read_u32, however in
1353 * case of little endian ARC instructions are in middle endian format, so
1354 * different type of conversion should be done.
1355 */
1356 int arc_read_instruction_u32(struct target *target, uint32_t address,
1357 uint32_t *value)
1358 {
1359 uint8_t value_buf[4];
1360
1361 if (!target_was_examined(target)) {
1362 LOG_ERROR("Target not examined yet");
1363 return ERROR_FAIL;
1364 }
1365
1366 *value = 0;
1367 CHECK_RETVAL(target_read_buffer(target, address, 4, value_buf));
1368
1369 if (target->endianness == TARGET_LITTLE_ENDIAN)
1370 *value = arc_me_to_h_u32(value_buf);
1371 else
1372 *value = be_to_h_u32(value_buf);
1373
1374 LOG_DEBUG("Address: 0x%08" PRIx32 ", value: 0x%08" PRIx32, address,
1375 *value);
1376
1377 return ERROR_OK;
1378 }
1379
1380 static int arc_set_breakpoint(struct target *target,
1381 struct breakpoint *breakpoint)
1382 {
1383
1384 if (breakpoint->set) {
1385 LOG_WARNING("breakpoint already set");
1386 return ERROR_OK;
1387 }
1388
1389 if (breakpoint->type == BKPT_SOFT) {
1390 LOG_DEBUG("bpid: %" PRIu32, breakpoint->unique_id);
1391
1392 if (breakpoint->length == 4) {
1393 uint32_t verify = 0xffffffff;
1394
1395 CHECK_RETVAL(target_read_buffer(target, breakpoint->address, breakpoint->length,
1396 breakpoint->orig_instr));
1397
1398 CHECK_RETVAL(arc_write_instruction_u32(target, breakpoint->address,
1399 ARC_SDBBP_32));
1400
1401 CHECK_RETVAL(arc_read_instruction_u32(target, breakpoint->address, &verify));
1402
1403 if (verify != ARC_SDBBP_32) {
1404 LOG_ERROR("Unable to set 32bit breakpoint at address @0x%" TARGET_PRIxADDR
1405 " - check that memory is read/writable", breakpoint->address);
1406 return ERROR_FAIL;
1407 }
1408 } else if (breakpoint->length == 2) {
1409 uint16_t verify = 0xffff;
1410
1411 CHECK_RETVAL(target_read_buffer(target, breakpoint->address, breakpoint->length,
1412 breakpoint->orig_instr));
1413 CHECK_RETVAL(target_write_u16(target, breakpoint->address, ARC_SDBBP_16));
1414
1415 CHECK_RETVAL(target_read_u16(target, breakpoint->address, &verify));
1416 if (verify != ARC_SDBBP_16) {
1417 LOG_ERROR("Unable to set 16bit breakpoint at address @0x%" TARGET_PRIxADDR
1418 " - check that memory is read/writable", breakpoint->address);
1419 return ERROR_FAIL;
1420 }
1421 } else {
1422 LOG_ERROR("Invalid breakpoint length: target supports only 2 or 4");
1423 return ERROR_COMMAND_ARGUMENT_INVALID;
1424 }
1425
1426 breakpoint->set = 64; /* Any nice value but 0 */
1427 } else if (breakpoint->type == BKPT_HARD) {
1428 LOG_DEBUG("Hardware breakpoints are not supported yet!");
1429 return ERROR_FAIL;
1430 } else {
1431 LOG_DEBUG("ERROR: setting unknown breakpoint type");
1432 return ERROR_FAIL;
1433 }
1434
1435 /* core instruction cache is now invalid. */
1436 CHECK_RETVAL(arc_cache_invalidate(target));
1437
1438 return ERROR_OK;
1439 }
1440
1441 static int arc_unset_breakpoint(struct target *target,
1442 struct breakpoint *breakpoint)
1443 {
1444 int retval = ERROR_OK;
1445
1446 if (!breakpoint->set) {
1447 LOG_WARNING("breakpoint not set");
1448 return ERROR_OK;
1449 }
1450
1451 if (breakpoint->type == BKPT_SOFT) {
1452 /* restore original instruction (kept in target endianness) */
1453 LOG_DEBUG("bpid: %" PRIu32, breakpoint->unique_id);
1454 if (breakpoint->length == 4) {
1455 uint32_t current_instr;
1456
1457 /* check that user program has not modified breakpoint instruction */
1458 CHECK_RETVAL(arc_read_instruction_u32(target, breakpoint->address, &current_instr));
1459
1460 if (current_instr == ARC_SDBBP_32) {
1461 retval = target_write_buffer(target, breakpoint->address,
1462 breakpoint->length, breakpoint->orig_instr);
1463 if (retval != ERROR_OK)
1464 return retval;
1465 } else {
1466 LOG_WARNING("Software breakpoint @0x%" TARGET_PRIxADDR
1467 " has been overwritten outside of debugger."
1468 "Expected: @0x%" PRIx32 ", got: @0x%" PRIx32,
1469 breakpoint->address, ARC_SDBBP_32, current_instr);
1470 }
1471 } else if (breakpoint->length == 2) {
1472 uint16_t current_instr;
1473
1474 /* check that user program has not modified breakpoint instruction */
1475 CHECK_RETVAL(target_read_u16(target, breakpoint->address, &current_instr));
1476 if (current_instr == ARC_SDBBP_16) {
1477 retval = target_write_buffer(target, breakpoint->address,
1478 breakpoint->length, breakpoint->orig_instr);
1479 if (retval != ERROR_OK)
1480 return retval;
1481 } else {
1482 LOG_WARNING("Software breakpoint @0x%" TARGET_PRIxADDR
1483 " has been overwritten outside of debugger. "
1484 "Expected: 0x%04x, got: 0x%04" PRIx16,
1485 breakpoint->address, ARC_SDBBP_16, current_instr);
1486 }
1487 } else {
1488 LOG_ERROR("Invalid breakpoint length: target supports only 2 or 4");
1489 return ERROR_COMMAND_ARGUMENT_INVALID;
1490 }
1491 breakpoint->set = 0;
1492
1493 } else if (breakpoint->type == BKPT_HARD) {
1494 LOG_WARNING("Hardware breakpoints are not supported yet!");
1495 return ERROR_FAIL;
1496 } else {
1497 LOG_DEBUG("ERROR: unsetting unknown breakpoint type");
1498 return ERROR_FAIL;
1499 }
1500
1501 /* core instruction cache is now invalid. */
1502 CHECK_RETVAL(arc_cache_invalidate(target));
1503
1504 return retval;
1505 }
1506
1507
1508 static int arc_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
1509 {
1510 if (target->state == TARGET_HALTED) {
1511 return arc_set_breakpoint(target, breakpoint);
1512
1513 } else {
1514 LOG_WARNING(" > core was not halted, please try again.");
1515 return ERROR_TARGET_NOT_HALTED;
1516 }
1517 }
1518
1519 static int arc_remove_breakpoint(struct target *target,
1520 struct breakpoint *breakpoint)
1521 {
1522 if (target->state == TARGET_HALTED) {
1523 if (breakpoint->set)
1524 CHECK_RETVAL(arc_unset_breakpoint(target, breakpoint));
1525 } else {
1526 LOG_WARNING("target not halted");
1527 return ERROR_TARGET_NOT_HALTED;
1528 }
1529
1530 return ERROR_OK;
1531 }
1532
1533 /* Helper function which swiches core to single_step mode by
1534 * doing aux r/w operations. */
1535 int arc_config_step(struct target *target, int enable_step)
1536 {
1537 uint32_t value;
1538
1539 struct arc_common *arc = target_to_arc(target);
1540
1541 /* enable core debug step mode */
1542 if (enable_step) {
1543 CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info, AUX_STATUS32_REG,
1544 &value));
1545 value &= ~SET_CORE_AE_BIT; /* clear the AE bit */
1546 CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_STATUS32_REG,
1547 value));
1548 LOG_DEBUG(" [status32:0x%08" PRIx32 "]", value);
1549
1550 /* Doing read-modify-write, because DEBUG might contain manually set
1551 * bits like UB or ED, which should be preserved. */
1552 CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info,
1553 AUX_DEBUG_REG, &value));
1554 value |= SET_CORE_SINGLE_INSTR_STEP; /* set the IS bit */
1555 CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_DEBUG_REG,
1556 value));
1557 LOG_DEBUG("core debug step mode enabled [debug-reg:0x%08" PRIx32 "]", value);
1558
1559 } else { /* disable core debug step mode */
1560 CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info, AUX_DEBUG_REG,
1561 &value));
1562 value &= ~SET_CORE_SINGLE_INSTR_STEP; /* clear the IS bit */
1563 CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_DEBUG_REG,
1564 value));
1565 LOG_DEBUG("core debug step mode disabled");
1566 }
1567
1568 return ERROR_OK;
1569 }
1570
1571 int arc_step(struct target *target, int current, target_addr_t address,
1572 int handle_breakpoints)
1573 {
1574 /* get pointers to arch-specific information */
1575 struct arc_common *arc = target_to_arc(target);
1576 struct breakpoint *breakpoint = NULL;
1577 struct reg *pc = &(arc->core_and_aux_cache->reg_list[arc->pc_index_in_cache]);
1578
1579 if (target->state != TARGET_HALTED) {
1580 LOG_WARNING("target not halted");
1581 return ERROR_TARGET_NOT_HALTED;
1582 }
1583
1584 /* current = 1: continue on current pc, otherwise continue at <address> */
1585 if (!current) {
1586 buf_set_u32(pc->value, 0, 32, address);
1587 pc->dirty = 1;
1588 pc->valid = 1;
1589 }
1590
1591 LOG_DEBUG("Target steps one instruction from PC=0x%" PRIx32,
1592 buf_get_u32(pc->value, 0, 32));
1593
1594 /* the front-end may request us not to handle breakpoints */
1595 if (handle_breakpoints) {
1596 breakpoint = breakpoint_find(target, buf_get_u32(pc->value, 0, 32));
1597 if (breakpoint)
1598 CHECK_RETVAL(arc_unset_breakpoint(target, breakpoint));
1599 }
1600
1601 /* restore context */
1602 CHECK_RETVAL(arc_restore_context(target));
1603
1604 target->debug_reason = DBG_REASON_SINGLESTEP;
1605
1606 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_RESUMED));
1607
1608 /* disable interrupts while stepping */
1609 CHECK_RETVAL(arc_enable_interrupts(target, 0));
1610
1611 /* do a single step */
1612 CHECK_RETVAL(arc_config_step(target, 1));
1613
1614 /* make sure we done our step */
1615 alive_sleep(1);
1616
1617 /* registers are now invalid */
1618 register_cache_invalidate(arc->core_and_aux_cache);
1619
1620 if (breakpoint)
1621 CHECK_RETVAL(arc_set_breakpoint(target, breakpoint));
1622
1623 LOG_DEBUG("target stepped ");
1624
1625 target->state = TARGET_HALTED;
1626
1627 /* Saving context */
1628 CHECK_RETVAL(arc_debug_entry(target));
1629 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_HALTED));
1630
1631 return ERROR_OK;
1632 }
1633
1634
1635 /* This function invalidates icache. */
1636 static int arc_icache_invalidate(struct target *target)
1637 {
1638 uint32_t value;
1639
1640 struct arc_common *arc = target_to_arc(target);
1641
1642 /* Don't waste time if already done. */
1643 if (!arc->has_icache || arc->icache_invalidated)
1644 return ERROR_OK;
1645
1646 LOG_DEBUG("Invalidating I$.");
1647
1648 value = IC_IVIC_INVALIDATE; /* invalidate I$ */
1649 CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_IC_IVIC_REG, value));
1650
1651 arc->icache_invalidated = true;
1652
1653 return ERROR_OK;
1654 }
1655
1656 /* This function invalidates dcache */
1657 static int arc_dcache_invalidate(struct target *target)
1658 {
1659 uint32_t value, dc_ctrl_value;
1660
1661 struct arc_common *arc = target_to_arc(target);
1662
1663 if (!arc->has_dcache || arc->dcache_invalidated)
1664 return ERROR_OK;
1665
1666 LOG_DEBUG("Invalidating D$.");
1667
1668 CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info, AUX_DC_CTRL_REG, &value));
1669 dc_ctrl_value = value;
1670 value &= ~DC_CTRL_IM;
1671
1672 /* set DC_CTRL invalidate mode to invalidate-only (no flushing!!) */
1673 CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_DC_CTRL_REG, value));
1674 value = DC_IVDC_INVALIDATE; /* invalidate D$ */
1675 CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_DC_IVDC_REG, value));
1676
1677 /* restore DC_CTRL invalidate mode */
1678 CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_DC_CTRL_REG, dc_ctrl_value));
1679
1680 arc->dcache_invalidated = true;
1681
1682 return ERROR_OK;
1683 }
1684
1685 /* This function invalidates l2 cache. */
1686 static int arc_l2cache_invalidate(struct target *target)
1687 {
1688 uint32_t value, slc_ctrl_value;
1689
1690 struct arc_common *arc = target_to_arc(target);
1691
1692 if (!arc->has_l2cache || arc->l2cache_invalidated)
1693 return ERROR_OK;
1694
1695 LOG_DEBUG("Invalidating L2$.");
1696
1697 CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info, SLC_AUX_CACHE_CTRL, &value));
1698 slc_ctrl_value = value;
1699 value &= ~L2_CTRL_IM;
1700
1701 /* set L2_CTRL invalidate mode to invalidate-only (no flushing!!) */
1702 CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, SLC_AUX_CACHE_CTRL, value));
1703 /* invalidate L2$ */
1704 CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, SLC_AUX_CACHE_INV, L2_INV_IV));
1705
1706 /* Wait until invalidate operation ends */
1707 do {
1708 LOG_DEBUG("Waiting for invalidation end.");
1709 CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info, SLC_AUX_CACHE_CTRL, &value));
1710 } while (value & L2_CTRL_BS);
1711
1712 /* restore L2_CTRL invalidate mode */
1713 CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, SLC_AUX_CACHE_CTRL, slc_ctrl_value));
1714
1715 arc->l2cache_invalidated = true;
1716
1717 return ERROR_OK;
1718 }
1719
1720
1721 int arc_cache_invalidate(struct target *target)
1722 {
1723 CHECK_RETVAL(arc_icache_invalidate(target));
1724 CHECK_RETVAL(arc_dcache_invalidate(target));
1725 CHECK_RETVAL(arc_l2cache_invalidate(target));
1726
1727 return ERROR_OK;
1728 }
1729
1730 /* Flush data cache. This function is cheap to call and return quickly if D$
1731 * already has been flushed since target had been halted. JTAG debugger reads
1732 * values directly from memory, bypassing cache, so if there are unflushed
1733 * lines debugger will read invalid values, which will cause a lot of troubles.
1734 * */
1735 int arc_dcache_flush(struct target *target)
1736 {
1737 uint32_t value, dc_ctrl_value;
1738 bool has_to_set_dc_ctrl_im;
1739
1740 struct arc_common *arc = target_to_arc(target);
1741
1742 /* Don't waste time if already done. */
1743 if (!arc->has_dcache || arc->dcache_flushed)
1744 return ERROR_OK;
1745
1746 LOG_DEBUG("Flushing D$.");
1747
1748 /* Store current value of DC_CTRL */
1749 CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info, AUX_DC_CTRL_REG, &dc_ctrl_value));
1750
1751 /* Set DC_CTRL invalidate mode to flush (if not already set) */
1752 has_to_set_dc_ctrl_im = (dc_ctrl_value & DC_CTRL_IM) == 0;
1753 if (has_to_set_dc_ctrl_im) {
1754 value = dc_ctrl_value | DC_CTRL_IM;
1755 CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_DC_CTRL_REG, value));
1756 }
1757
1758 /* Flush D$ */
1759 value = DC_IVDC_INVALIDATE;
1760 CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_DC_IVDC_REG, value));
1761
1762 /* Restore DC_CTRL invalidate mode (even of flush failed) */
1763 if (has_to_set_dc_ctrl_im)
1764 CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_DC_CTRL_REG, dc_ctrl_value));
1765
1766 arc->dcache_flushed = true;
1767
1768 return ERROR_OK;
1769 }
1770
1771 /* This function flushes l2cache. */
1772 static int arc_l2cache_flush(struct target *target)
1773 {
1774 uint32_t value;
1775
1776 struct arc_common *arc = target_to_arc(target);
1777
1778 /* Don't waste time if already done. */
1779 if (!arc->has_l2cache || arc->l2cache_flushed)
1780 return ERROR_OK;
1781
1782 LOG_DEBUG("Flushing L2$.");
1783
1784 /* Flush L2 cache */
1785 CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, SLC_AUX_CACHE_FLUSH, L2_FLUSH_FL));
1786
1787 /* Wait until flush operation ends */
1788 do {
1789 LOG_DEBUG("Waiting for flushing end.");
1790 CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info, SLC_AUX_CACHE_CTRL, &value));
1791 } while (value & L2_CTRL_BS);
1792
1793 arc->l2cache_flushed = true;
1794
1795 return ERROR_OK;
1796 }
1797
1798 int arc_cache_flush(struct target *target)
1799 {
1800 CHECK_RETVAL(arc_dcache_flush(target));
1801 CHECK_RETVAL(arc_l2cache_flush(target));
1802
1803 return ERROR_OK;
1804 }
1805
1806 /* ARC v2 target */
1807 struct target_type arcv2_target = {
1808 .name = "arcv2",
1809
1810 .poll = arc_poll,
1811
1812 .arch_state = arc_arch_state,
1813
1814 /* TODO That seems like something similiar to metaware hostlink, so perhaps
1815 * we can exploit this in the future. */
1816 .target_request_data = NULL,
1817
1818 .halt = arc_halt,
1819 .resume = arc_resume,
1820 .step = arc_step,
1821
1822 .assert_reset = arc_assert_reset,
1823 .deassert_reset = arc_deassert_reset,
1824
1825 /* TODO Implement soft_reset_halt */
1826 .soft_reset_halt = NULL,
1827
1828 .get_gdb_reg_list = arc_get_gdb_reg_list,
1829
1830 .read_memory = arc_mem_read,
1831 .write_memory = arc_mem_write,
1832 .checksum_memory = NULL,
1833 .blank_check_memory = NULL,
1834
1835 .add_breakpoint = arc_add_breakpoint,
1836 .add_context_breakpoint = NULL,
1837 .add_hybrid_breakpoint = NULL,
1838 .remove_breakpoint = arc_remove_breakpoint,
1839 .add_watchpoint = NULL,
1840 .remove_watchpoint = NULL,
1841 .hit_watchpoint = NULL,
1842
1843 .run_algorithm = NULL,
1844 .start_algorithm = NULL,
1845 .wait_algorithm = NULL,
1846
1847 .commands = arc_monitor_command_handlers,
1848
1849 .target_create = arc_target_create,
1850 .init_target = arc_init_target,
1851 .deinit_target = arc_deinit_target,
1852 .examine = arc_examine,
1853
1854 .virt2phys = NULL,
1855 .read_phys_memory = NULL,
1856 .write_phys_memory = NULL,
1857 .mmu = NULL,
1858 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)