jtag: linuxgpiod: drop extra parenthesis
[openocd.git] / src / target / arm_adi_v5.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Copyright (C) 2006 by Magnus Lundin *
5 * lundin@mlu.mine.nu *
6 * *
7 * Copyright (C) 2008 by Spencer Oliver *
8 * spen@spen-soft.co.uk *
9 * *
10 * Copyright (C) 2009-2010 by Oyvind Harboe *
11 * oyvind.harboe@zylin.com *
12 * *
13 * Copyright (C) 2009-2010 by David Brownell *
14 * *
15 * Copyright (C) 2013 by Andreas Fritiofson *
16 * andreas.fritiofson@gmail.com *
17 * *
18 * Copyright (C) 2019-2021, Ampere Computing LLC *
19 ***************************************************************************/
20
21 /**
22 * @file
23 * This file implements support for the ARM Debug Interface version 5 (ADIv5)
24 * debugging architecture. Compared with previous versions, this includes
25 * a low pin-count Serial Wire Debug (SWD) alternative to JTAG for message
26 * transport, and focuses on memory mapped resources as defined by the
27 * CoreSight architecture.
28 *
29 * A key concept in ADIv5 is the Debug Access Port, or DAP. A DAP has two
30 * basic components: a Debug Port (DP) transporting messages to and from a
31 * debugger, and an Access Port (AP) accessing resources. Three types of DP
32 * are defined. One uses only JTAG for communication, and is called JTAG-DP.
33 * One uses only SWD for communication, and is called SW-DP. The third can
34 * use either SWD or JTAG, and is called SWJ-DP. The most common type of AP
35 * is used to access memory mapped resources and is called a MEM-AP. Also a
36 * JTAG-AP is also defined, bridging to JTAG resources; those are uncommon.
37 *
38 * This programming interface allows DAP pipelined operations through a
39 * transaction queue. This primarily affects AP operations (such as using
40 * a MEM-AP to access memory or registers). If the current transaction has
41 * not finished by the time the next one must begin, and the ORUNDETECT bit
42 * is set in the DP_CTRL_STAT register, the SSTICKYORUN status is set and
43 * further AP operations will fail. There are two basic methods to avoid
44 * such overrun errors. One involves polling for status instead of using
45 * transaction pipelining. The other involves adding delays to ensure the
46 * AP has enough time to complete one operation before starting the next
47 * one. (For JTAG these delays are controlled by memaccess_tck.)
48 */
49
50 /*
51 * Relevant specifications from ARM include:
52 *
53 * ARM(tm) Debug Interface v5 Architecture Specification ARM IHI 0031E
54 * CoreSight(tm) v1.0 Architecture Specification ARM IHI 0029B
55 *
56 * CoreSight(tm) DAP-Lite TRM, ARM DDI 0316D
57 * Cortex-M3(tm) TRM, ARM DDI 0337G
58 */
59
60 #ifdef HAVE_CONFIG_H
61 #include "config.h"
62 #endif
63
64 #include "jtag/interface.h"
65 #include "arm.h"
66 #include "arm_adi_v5.h"
67 #include "arm_coresight.h"
68 #include "jtag/swd.h"
69 #include "transport/transport.h"
70 #include <helper/align.h>
71 #include <helper/jep106.h>
72 #include <helper/time_support.h>
73 #include <helper/list.h>
74 #include <helper/jim-nvp.h>
75
76 /* ARM ADI Specification requires at least 10 bits used for TAR autoincrement */
77
78 /*
79 uint32_t tar_block_size(uint32_t address)
80 Return the largest block starting at address that does not cross a tar block size alignment boundary
81 */
82 static uint32_t max_tar_block_size(uint32_t tar_autoincr_block, target_addr_t address)
83 {
84 return tar_autoincr_block - ((tar_autoincr_block - 1) & address);
85 }
86
87 /***************************************************************************
88 * *
89 * DP and MEM-AP register access through APACC and DPACC *
90 * *
91 ***************************************************************************/
92
93 static int mem_ap_setup_csw(struct adiv5_ap *ap, uint32_t csw)
94 {
95 csw |= ap->csw_default;
96
97 if (csw != ap->csw_value) {
98 /* LOG_DEBUG("DAP: Set CSW %x",csw); */
99 int retval = dap_queue_ap_write(ap, MEM_AP_REG_CSW(ap->dap), csw);
100 if (retval != ERROR_OK) {
101 ap->csw_value = 0;
102 return retval;
103 }
104 ap->csw_value = csw;
105 }
106 return ERROR_OK;
107 }
108
109 static int mem_ap_setup_tar(struct adiv5_ap *ap, target_addr_t tar)
110 {
111 if (!ap->tar_valid || tar != ap->tar_value) {
112 /* LOG_DEBUG("DAP: Set TAR %x",tar); */
113 int retval = dap_queue_ap_write(ap, MEM_AP_REG_TAR(ap->dap), (uint32_t)(tar & 0xffffffffUL));
114 if (retval == ERROR_OK && is_64bit_ap(ap)) {
115 /* See if bits 63:32 of tar is different from last setting */
116 if ((ap->tar_value >> 32) != (tar >> 32))
117 retval = dap_queue_ap_write(ap, MEM_AP_REG_TAR64(ap->dap), (uint32_t)(tar >> 32));
118 }
119 if (retval != ERROR_OK) {
120 ap->tar_valid = false;
121 return retval;
122 }
123 ap->tar_value = tar;
124 ap->tar_valid = true;
125 }
126 return ERROR_OK;
127 }
128
129 static int mem_ap_read_tar(struct adiv5_ap *ap, target_addr_t *tar)
130 {
131 uint32_t lower;
132 uint32_t upper = 0;
133
134 int retval = dap_queue_ap_read(ap, MEM_AP_REG_TAR(ap->dap), &lower);
135 if (retval == ERROR_OK && is_64bit_ap(ap))
136 retval = dap_queue_ap_read(ap, MEM_AP_REG_TAR64(ap->dap), &upper);
137
138 if (retval != ERROR_OK) {
139 ap->tar_valid = false;
140 return retval;
141 }
142
143 retval = dap_run(ap->dap);
144 if (retval != ERROR_OK) {
145 ap->tar_valid = false;
146 return retval;
147 }
148
149 *tar = (((target_addr_t)upper) << 32) | (target_addr_t)lower;
150
151 ap->tar_value = *tar;
152 ap->tar_valid = true;
153 return ERROR_OK;
154 }
155
156 static uint32_t mem_ap_get_tar_increment(struct adiv5_ap *ap)
157 {
158 switch (ap->csw_value & CSW_ADDRINC_MASK) {
159 case CSW_ADDRINC_SINGLE:
160 switch (ap->csw_value & CSW_SIZE_MASK) {
161 case CSW_8BIT:
162 return 1;
163 case CSW_16BIT:
164 return 2;
165 case CSW_32BIT:
166 return 4;
167 default:
168 return 0;
169 }
170 case CSW_ADDRINC_PACKED:
171 return 4;
172 }
173 return 0;
174 }
175
176 /* mem_ap_update_tar_cache is called after an access to MEM_AP_REG_DRW
177 */
178 static void mem_ap_update_tar_cache(struct adiv5_ap *ap)
179 {
180 if (!ap->tar_valid)
181 return;
182
183 uint32_t inc = mem_ap_get_tar_increment(ap);
184 if (inc >= max_tar_block_size(ap->tar_autoincr_block, ap->tar_value))
185 ap->tar_valid = false;
186 else
187 ap->tar_value += inc;
188 }
189
190 /**
191 * Queue transactions setting up transfer parameters for the
192 * currently selected MEM-AP.
193 *
194 * Subsequent transfers using registers like MEM_AP_REG_DRW or MEM_AP_REG_BD2
195 * initiate data reads or writes using memory or peripheral addresses.
196 * If the CSW is configured for it, the TAR may be automatically
197 * incremented after each transfer.
198 *
199 * @param ap The MEM-AP.
200 * @param csw MEM-AP Control/Status Word (CSW) register to assign. If this
201 * matches the cached value, the register is not changed.
202 * @param tar MEM-AP Transfer Address Register (TAR) to assign. If this
203 * matches the cached address, the register is not changed.
204 *
205 * @return ERROR_OK if the transaction was properly queued, else a fault code.
206 */
207 static int mem_ap_setup_transfer(struct adiv5_ap *ap, uint32_t csw, target_addr_t tar)
208 {
209 int retval;
210 retval = mem_ap_setup_csw(ap, csw);
211 if (retval != ERROR_OK)
212 return retval;
213 retval = mem_ap_setup_tar(ap, tar);
214 if (retval != ERROR_OK)
215 return retval;
216 return ERROR_OK;
217 }
218
219 /**
220 * Asynchronous (queued) read of a word from memory or a system register.
221 *
222 * @param ap The MEM-AP to access.
223 * @param address Address of the 32-bit word to read; it must be
224 * readable by the currently selected MEM-AP.
225 * @param value points to where the word will be stored when the
226 * transaction queue is flushed (assuming no errors).
227 *
228 * @return ERROR_OK for success. Otherwise a fault code.
229 */
230 int mem_ap_read_u32(struct adiv5_ap *ap, target_addr_t address,
231 uint32_t *value)
232 {
233 int retval;
234
235 /* Use banked addressing (REG_BDx) to avoid some link traffic
236 * (updating TAR) when reading several consecutive addresses.
237 */
238 retval = mem_ap_setup_transfer(ap,
239 CSW_32BIT | (ap->csw_value & CSW_ADDRINC_MASK),
240 address & 0xFFFFFFFFFFFFFFF0ull);
241 if (retval != ERROR_OK)
242 return retval;
243
244 return dap_queue_ap_read(ap, MEM_AP_REG_BD0(ap->dap) | (address & 0xC), value);
245 }
246
247 /**
248 * Synchronous read of a word from memory or a system register.
249 * As a side effect, this flushes any queued transactions.
250 *
251 * @param ap The MEM-AP to access.
252 * @param address Address of the 32-bit word to read; it must be
253 * readable by the currently selected MEM-AP.
254 * @param value points to where the result will be stored.
255 *
256 * @return ERROR_OK for success; *value holds the result.
257 * Otherwise a fault code.
258 */
259 int mem_ap_read_atomic_u32(struct adiv5_ap *ap, target_addr_t address,
260 uint32_t *value)
261 {
262 int retval;
263
264 retval = mem_ap_read_u32(ap, address, value);
265 if (retval != ERROR_OK)
266 return retval;
267
268 return dap_run(ap->dap);
269 }
270
271 /**
272 * Asynchronous (queued) write of a word to memory or a system register.
273 *
274 * @param ap The MEM-AP to access.
275 * @param address Address to be written; it must be writable by
276 * the currently selected MEM-AP.
277 * @param value Word that will be written to the address when transaction
278 * queue is flushed (assuming no errors).
279 *
280 * @return ERROR_OK for success. Otherwise a fault code.
281 */
282 int mem_ap_write_u32(struct adiv5_ap *ap, target_addr_t address,
283 uint32_t value)
284 {
285 int retval;
286
287 /* Use banked addressing (REG_BDx) to avoid some link traffic
288 * (updating TAR) when writing several consecutive addresses.
289 */
290 retval = mem_ap_setup_transfer(ap,
291 CSW_32BIT | (ap->csw_value & CSW_ADDRINC_MASK),
292 address & 0xFFFFFFFFFFFFFFF0ull);
293 if (retval != ERROR_OK)
294 return retval;
295
296 return dap_queue_ap_write(ap, MEM_AP_REG_BD0(ap->dap) | (address & 0xC),
297 value);
298 }
299
300 /**
301 * Synchronous write of a word to memory or a system register.
302 * As a side effect, this flushes any queued transactions.
303 *
304 * @param ap The MEM-AP to access.
305 * @param address Address to be written; it must be writable by
306 * the currently selected MEM-AP.
307 * @param value Word that will be written.
308 *
309 * @return ERROR_OK for success; the data was written. Otherwise a fault code.
310 */
311 int mem_ap_write_atomic_u32(struct adiv5_ap *ap, target_addr_t address,
312 uint32_t value)
313 {
314 int retval = mem_ap_write_u32(ap, address, value);
315
316 if (retval != ERROR_OK)
317 return retval;
318
319 return dap_run(ap->dap);
320 }
321
322 /**
323 * Synchronous write of a block of memory, using a specific access size.
324 *
325 * @param ap The MEM-AP to access.
326 * @param buffer The data buffer to write. No particular alignment is assumed.
327 * @param size Which access size to use, in bytes. 1, 2 or 4.
328 * @param count The number of writes to do (in size units, not bytes).
329 * @param address Address to be written; it must be writable by the currently selected MEM-AP.
330 * @param addrinc Whether the target address should be increased for each write or not. This
331 * should normally be true, except when writing to e.g. a FIFO.
332 * @return ERROR_OK on success, otherwise an error code.
333 */
334 static int mem_ap_write(struct adiv5_ap *ap, const uint8_t *buffer, uint32_t size, uint32_t count,
335 target_addr_t address, bool addrinc)
336 {
337 struct adiv5_dap *dap = ap->dap;
338 size_t nbytes = size * count;
339 const uint32_t csw_addrincr = addrinc ? CSW_ADDRINC_SINGLE : CSW_ADDRINC_OFF;
340 uint32_t csw_size;
341 target_addr_t addr_xor;
342 int retval = ERROR_OK;
343
344 /* TI BE-32 Quirks mode:
345 * Writes on big-endian TMS570 behave very strangely. Observed behavior:
346 * size write address bytes written in order
347 * 4 TAR ^ 0 (val >> 24), (val >> 16), (val >> 8), (val)
348 * 2 TAR ^ 2 (val >> 8), (val)
349 * 1 TAR ^ 3 (val)
350 * For example, if you attempt to write a single byte to address 0, the processor
351 * will actually write a byte to address 3.
352 *
353 * To make writes of size < 4 work as expected, we xor a value with the address before
354 * setting the TAP, and we set the TAP after every transfer rather then relying on
355 * address increment. */
356
357 if (size == 4) {
358 csw_size = CSW_32BIT;
359 addr_xor = 0;
360 } else if (size == 2) {
361 csw_size = CSW_16BIT;
362 addr_xor = dap->ti_be_32_quirks ? 2 : 0;
363 } else if (size == 1) {
364 csw_size = CSW_8BIT;
365 addr_xor = dap->ti_be_32_quirks ? 3 : 0;
366 } else {
367 return ERROR_TARGET_UNALIGNED_ACCESS;
368 }
369
370 if (ap->unaligned_access_bad && (address % size != 0))
371 return ERROR_TARGET_UNALIGNED_ACCESS;
372
373 while (nbytes > 0) {
374 uint32_t this_size = size;
375
376 /* Select packed transfer if possible */
377 if (addrinc && ap->packed_transfers && nbytes >= 4
378 && max_tar_block_size(ap->tar_autoincr_block, address) >= 4) {
379 this_size = 4;
380 retval = mem_ap_setup_csw(ap, csw_size | CSW_ADDRINC_PACKED);
381 } else {
382 retval = mem_ap_setup_csw(ap, csw_size | csw_addrincr);
383 }
384
385 if (retval != ERROR_OK)
386 break;
387
388 retval = mem_ap_setup_tar(ap, address ^ addr_xor);
389 if (retval != ERROR_OK)
390 return retval;
391
392 /* How many source bytes each transfer will consume, and their location in the DRW,
393 * depends on the type of transfer and alignment. See ARM document IHI0031C. */
394 uint32_t outvalue = 0;
395 uint32_t drw_byte_idx = address;
396 if (dap->ti_be_32_quirks) {
397 switch (this_size) {
398 case 4:
399 outvalue |= (uint32_t)*buffer++ << 8 * (3 ^ (drw_byte_idx++ & 3) ^ addr_xor);
400 outvalue |= (uint32_t)*buffer++ << 8 * (3 ^ (drw_byte_idx++ & 3) ^ addr_xor);
401 outvalue |= (uint32_t)*buffer++ << 8 * (3 ^ (drw_byte_idx++ & 3) ^ addr_xor);
402 outvalue |= (uint32_t)*buffer++ << 8 * (3 ^ (drw_byte_idx & 3) ^ addr_xor);
403 break;
404 case 2:
405 outvalue |= (uint32_t)*buffer++ << 8 * (1 ^ (drw_byte_idx++ & 3) ^ addr_xor);
406 outvalue |= (uint32_t)*buffer++ << 8 * (1 ^ (drw_byte_idx & 3) ^ addr_xor);
407 break;
408 case 1:
409 outvalue |= (uint32_t)*buffer++ << 8 * (0 ^ (drw_byte_idx & 3) ^ addr_xor);
410 break;
411 }
412 } else if (dap->nu_npcx_quirks) {
413 switch (this_size) {
414 case 4:
415 outvalue |= (uint32_t)*buffer++ << 8 * (drw_byte_idx++ & 3);
416 outvalue |= (uint32_t)*buffer++ << 8 * (drw_byte_idx++ & 3);
417 outvalue |= (uint32_t)*buffer++ << 8 * (drw_byte_idx++ & 3);
418 outvalue |= (uint32_t)*buffer++ << 8 * (drw_byte_idx & 3);
419 break;
420 case 2:
421 outvalue |= (uint32_t)*buffer << 8 * (drw_byte_idx++ & 3);
422 outvalue |= (uint32_t)*(buffer+1) << 8 * (drw_byte_idx++ & 3);
423 outvalue |= (uint32_t)*buffer++ << 8 * (drw_byte_idx++ & 3);
424 outvalue |= (uint32_t)*buffer++ << 8 * (drw_byte_idx & 3);
425 break;
426 case 1:
427 outvalue |= (uint32_t)*buffer << 8 * (drw_byte_idx++ & 3);
428 outvalue |= (uint32_t)*buffer << 8 * (drw_byte_idx++ & 3);
429 outvalue |= (uint32_t)*buffer << 8 * (drw_byte_idx++ & 3);
430 outvalue |= (uint32_t)*buffer++ << 8 * (drw_byte_idx & 3);
431 }
432 } else {
433 switch (this_size) {
434 case 4:
435 outvalue |= (uint32_t)*buffer++ << 8 * (drw_byte_idx++ & 3);
436 outvalue |= (uint32_t)*buffer++ << 8 * (drw_byte_idx++ & 3);
437 /* fallthrough */
438 case 2:
439 outvalue |= (uint32_t)*buffer++ << 8 * (drw_byte_idx++ & 3);
440 /* fallthrough */
441 case 1:
442 outvalue |= (uint32_t)*buffer++ << 8 * (drw_byte_idx & 3);
443 }
444 }
445
446 nbytes -= this_size;
447
448 retval = dap_queue_ap_write(ap, MEM_AP_REG_DRW(dap), outvalue);
449 if (retval != ERROR_OK)
450 break;
451
452 mem_ap_update_tar_cache(ap);
453 if (addrinc)
454 address += this_size;
455 }
456
457 /* REVISIT: Might want to have a queued version of this function that does not run. */
458 if (retval == ERROR_OK)
459 retval = dap_run(dap);
460
461 if (retval != ERROR_OK) {
462 target_addr_t tar;
463 if (mem_ap_read_tar(ap, &tar) == ERROR_OK)
464 LOG_ERROR("Failed to write memory at " TARGET_ADDR_FMT, tar);
465 else
466 LOG_ERROR("Failed to write memory and, additionally, failed to find out where");
467 }
468
469 return retval;
470 }
471
472 /**
473 * Synchronous read of a block of memory, using a specific access size.
474 *
475 * @param ap The MEM-AP to access.
476 * @param buffer The data buffer to receive the data. No particular alignment is assumed.
477 * @param size Which access size to use, in bytes. 1, 2 or 4.
478 * @param count The number of reads to do (in size units, not bytes).
479 * @param adr Address to be read; it must be readable by the currently selected MEM-AP.
480 * @param addrinc Whether the target address should be increased after each read or not. This
481 * should normally be true, except when reading from e.g. a FIFO.
482 * @return ERROR_OK on success, otherwise an error code.
483 */
484 static int mem_ap_read(struct adiv5_ap *ap, uint8_t *buffer, uint32_t size, uint32_t count,
485 target_addr_t adr, bool addrinc)
486 {
487 struct adiv5_dap *dap = ap->dap;
488 size_t nbytes = size * count;
489 const uint32_t csw_addrincr = addrinc ? CSW_ADDRINC_SINGLE : CSW_ADDRINC_OFF;
490 uint32_t csw_size;
491 target_addr_t address = adr;
492 int retval = ERROR_OK;
493
494 /* TI BE-32 Quirks mode:
495 * Reads on big-endian TMS570 behave strangely differently than writes.
496 * They read from the physical address requested, but with DRW byte-reversed.
497 * For example, a byte read from address 0 will place the result in the high bytes of DRW.
498 * Also, packed 8-bit and 16-bit transfers seem to sometimes return garbage in some bytes,
499 * so avoid them. */
500
501 if (size == 4)
502 csw_size = CSW_32BIT;
503 else if (size == 2)
504 csw_size = CSW_16BIT;
505 else if (size == 1)
506 csw_size = CSW_8BIT;
507 else
508 return ERROR_TARGET_UNALIGNED_ACCESS;
509
510 if (ap->unaligned_access_bad && (adr % size != 0))
511 return ERROR_TARGET_UNALIGNED_ACCESS;
512
513 /* Allocate buffer to hold the sequence of DRW reads that will be made. This is a significant
514 * over-allocation if packed transfers are going to be used, but determining the real need at
515 * this point would be messy. */
516 uint32_t *read_buf = calloc(count, sizeof(uint32_t));
517 /* Multiplication count * sizeof(uint32_t) may overflow, calloc() is safe */
518 uint32_t *read_ptr = read_buf;
519 if (!read_buf) {
520 LOG_ERROR("Failed to allocate read buffer");
521 return ERROR_FAIL;
522 }
523
524 /* Queue up all reads. Each read will store the entire DRW word in the read buffer. How many
525 * useful bytes it contains, and their location in the word, depends on the type of transfer
526 * and alignment. */
527 while (nbytes > 0) {
528 uint32_t this_size = size;
529
530 /* Select packed transfer if possible */
531 if (addrinc && ap->packed_transfers && nbytes >= 4
532 && max_tar_block_size(ap->tar_autoincr_block, address) >= 4) {
533 this_size = 4;
534 retval = mem_ap_setup_csw(ap, csw_size | CSW_ADDRINC_PACKED);
535 } else {
536 retval = mem_ap_setup_csw(ap, csw_size | csw_addrincr);
537 }
538 if (retval != ERROR_OK)
539 break;
540
541 retval = mem_ap_setup_tar(ap, address);
542 if (retval != ERROR_OK)
543 break;
544
545 retval = dap_queue_ap_read(ap, MEM_AP_REG_DRW(dap), read_ptr++);
546 if (retval != ERROR_OK)
547 break;
548
549 nbytes -= this_size;
550 if (addrinc)
551 address += this_size;
552
553 mem_ap_update_tar_cache(ap);
554 }
555
556 if (retval == ERROR_OK)
557 retval = dap_run(dap);
558
559 /* Restore state */
560 address = adr;
561 nbytes = size * count;
562 read_ptr = read_buf;
563
564 /* If something failed, read TAR to find out how much data was successfully read, so we can
565 * at least give the caller what we have. */
566 if (retval != ERROR_OK) {
567 target_addr_t tar;
568 if (mem_ap_read_tar(ap, &tar) == ERROR_OK) {
569 /* TAR is incremented after failed transfer on some devices (eg Cortex-M4) */
570 LOG_ERROR("Failed to read memory at " TARGET_ADDR_FMT, tar);
571 if (nbytes > tar - address)
572 nbytes = tar - address;
573 } else {
574 LOG_ERROR("Failed to read memory and, additionally, failed to find out where");
575 nbytes = 0;
576 }
577 }
578
579 /* Replay loop to populate caller's buffer from the correct word and byte lane */
580 while (nbytes > 0) {
581 uint32_t this_size = size;
582
583 if (addrinc && ap->packed_transfers && nbytes >= 4
584 && max_tar_block_size(ap->tar_autoincr_block, address) >= 4) {
585 this_size = 4;
586 }
587
588 if (dap->ti_be_32_quirks) {
589 switch (this_size) {
590 case 4:
591 *buffer++ = *read_ptr >> 8 * (3 - (address++ & 3));
592 *buffer++ = *read_ptr >> 8 * (3 - (address++ & 3));
593 /* fallthrough */
594 case 2:
595 *buffer++ = *read_ptr >> 8 * (3 - (address++ & 3));
596 /* fallthrough */
597 case 1:
598 *buffer++ = *read_ptr >> 8 * (3 - (address++ & 3));
599 }
600 } else {
601 switch (this_size) {
602 case 4:
603 *buffer++ = *read_ptr >> 8 * (address++ & 3);
604 *buffer++ = *read_ptr >> 8 * (address++ & 3);
605 /* fallthrough */
606 case 2:
607 *buffer++ = *read_ptr >> 8 * (address++ & 3);
608 /* fallthrough */
609 case 1:
610 *buffer++ = *read_ptr >> 8 * (address++ & 3);
611 }
612 }
613
614 read_ptr++;
615 nbytes -= this_size;
616 }
617
618 free(read_buf);
619 return retval;
620 }
621
622 int mem_ap_read_buf(struct adiv5_ap *ap,
623 uint8_t *buffer, uint32_t size, uint32_t count, target_addr_t address)
624 {
625 return mem_ap_read(ap, buffer, size, count, address, true);
626 }
627
628 int mem_ap_write_buf(struct adiv5_ap *ap,
629 const uint8_t *buffer, uint32_t size, uint32_t count, target_addr_t address)
630 {
631 return mem_ap_write(ap, buffer, size, count, address, true);
632 }
633
634 int mem_ap_read_buf_noincr(struct adiv5_ap *ap,
635 uint8_t *buffer, uint32_t size, uint32_t count, target_addr_t address)
636 {
637 return mem_ap_read(ap, buffer, size, count, address, false);
638 }
639
640 int mem_ap_write_buf_noincr(struct adiv5_ap *ap,
641 const uint8_t *buffer, uint32_t size, uint32_t count, target_addr_t address)
642 {
643 return mem_ap_write(ap, buffer, size, count, address, false);
644 }
645
646 /*--------------------------------------------------------------------------*/
647
648
649 #define DAP_POWER_DOMAIN_TIMEOUT (10)
650
651 /*--------------------------------------------------------------------------*/
652
653 /**
654 * Invalidate cached DP select and cached TAR and CSW of all APs
655 */
656 void dap_invalidate_cache(struct adiv5_dap *dap)
657 {
658 dap->select = DP_SELECT_INVALID;
659 dap->last_read = NULL;
660
661 int i;
662 for (i = 0; i <= DP_APSEL_MAX; i++) {
663 /* force csw and tar write on the next mem-ap access */
664 dap->ap[i].tar_valid = false;
665 dap->ap[i].csw_value = 0;
666 }
667 }
668
669 /**
670 * Initialize a DAP. This sets up the power domains, prepares the DP
671 * for further use and activates overrun checking.
672 *
673 * @param dap The DAP being initialized.
674 */
675 int dap_dp_init(struct adiv5_dap *dap)
676 {
677 int retval;
678
679 LOG_DEBUG("%s", adiv5_dap_name(dap));
680
681 dap->do_reconnect = false;
682 dap_invalidate_cache(dap);
683
684 /*
685 * Early initialize dap->dp_ctrl_stat.
686 * In jtag mode only, if the following queue run (in dap_dp_poll_register)
687 * fails and sets the sticky error, it will trigger the clearing
688 * of the sticky. Without this initialization system and debug power
689 * would be disabled while clearing the sticky error bit.
690 */
691 dap->dp_ctrl_stat = CDBGPWRUPREQ | CSYSPWRUPREQ;
692
693 /*
694 * This write operation clears the sticky error bit in jtag mode only and
695 * is ignored in swd mode. It also powers-up system and debug domains in
696 * both jtag and swd modes, if not done before.
697 */
698 retval = dap_queue_dp_write(dap, DP_CTRL_STAT, dap->dp_ctrl_stat | SSTICKYERR);
699 if (retval != ERROR_OK)
700 return retval;
701
702 retval = dap_queue_dp_read(dap, DP_CTRL_STAT, NULL);
703 if (retval != ERROR_OK)
704 return retval;
705
706 retval = dap_queue_dp_write(dap, DP_CTRL_STAT, dap->dp_ctrl_stat);
707 if (retval != ERROR_OK)
708 return retval;
709
710 /* Check that we have debug power domains activated */
711 LOG_DEBUG("DAP: wait CDBGPWRUPACK");
712 retval = dap_dp_poll_register(dap, DP_CTRL_STAT,
713 CDBGPWRUPACK, CDBGPWRUPACK,
714 DAP_POWER_DOMAIN_TIMEOUT);
715 if (retval != ERROR_OK)
716 return retval;
717
718 if (!dap->ignore_syspwrupack) {
719 LOG_DEBUG("DAP: wait CSYSPWRUPACK");
720 retval = dap_dp_poll_register(dap, DP_CTRL_STAT,
721 CSYSPWRUPACK, CSYSPWRUPACK,
722 DAP_POWER_DOMAIN_TIMEOUT);
723 if (retval != ERROR_OK)
724 return retval;
725 }
726
727 retval = dap_queue_dp_read(dap, DP_CTRL_STAT, NULL);
728 if (retval != ERROR_OK)
729 return retval;
730
731 /* With debug power on we can activate OVERRUN checking */
732 dap->dp_ctrl_stat = CDBGPWRUPREQ | CSYSPWRUPREQ | CORUNDETECT;
733 retval = dap_queue_dp_write(dap, DP_CTRL_STAT, dap->dp_ctrl_stat);
734 if (retval != ERROR_OK)
735 return retval;
736 retval = dap_queue_dp_read(dap, DP_CTRL_STAT, NULL);
737 if (retval != ERROR_OK)
738 return retval;
739
740 retval = dap_run(dap);
741 if (retval != ERROR_OK)
742 return retval;
743
744 return retval;
745 }
746
747 /**
748 * Initialize a DAP or do reconnect if DAP is not accessible.
749 *
750 * @param dap The DAP being initialized.
751 */
752 int dap_dp_init_or_reconnect(struct adiv5_dap *dap)
753 {
754 LOG_DEBUG("%s", adiv5_dap_name(dap));
755
756 /*
757 * Early initialize dap->dp_ctrl_stat.
758 * In jtag mode only, if the following atomic reads fail and set the
759 * sticky error, it will trigger the clearing of the sticky. Without this
760 * initialization system and debug power would be disabled while clearing
761 * the sticky error bit.
762 */
763 dap->dp_ctrl_stat = CDBGPWRUPREQ | CSYSPWRUPREQ;
764
765 dap->do_reconnect = false;
766
767 dap_dp_read_atomic(dap, DP_CTRL_STAT, NULL);
768 if (dap->do_reconnect) {
769 /* dap connect calls dap_dp_init() after transport dependent initialization */
770 return dap->ops->connect(dap);
771 } else {
772 return dap_dp_init(dap);
773 }
774 }
775
776 /**
777 * Initialize a DAP. This sets up the power domains, prepares the DP
778 * for further use, and arranges to use AP #0 for all AP operations
779 * until dap_ap-select() changes that policy.
780 *
781 * @param ap The MEM-AP being initialized.
782 */
783 int mem_ap_init(struct adiv5_ap *ap)
784 {
785 /* check that we support packed transfers */
786 uint32_t csw, cfg;
787 int retval;
788 struct adiv5_dap *dap = ap->dap;
789
790 /* Set ap->cfg_reg before calling mem_ap_setup_transfer(). */
791 /* mem_ap_setup_transfer() needs to know if the MEM_AP supports LPAE. */
792 retval = dap_queue_ap_read(ap, MEM_AP_REG_CFG(dap), &cfg);
793 if (retval != ERROR_OK)
794 return retval;
795
796 retval = dap_run(dap);
797 if (retval != ERROR_OK)
798 return retval;
799
800 ap->cfg_reg = cfg;
801 ap->tar_valid = false;
802 ap->csw_value = 0; /* force csw and tar write */
803 retval = mem_ap_setup_transfer(ap, CSW_8BIT | CSW_ADDRINC_PACKED, 0);
804 if (retval != ERROR_OK)
805 return retval;
806
807 retval = dap_queue_ap_read(ap, MEM_AP_REG_CSW(dap), &csw);
808 if (retval != ERROR_OK)
809 return retval;
810
811 retval = dap_run(dap);
812 if (retval != ERROR_OK)
813 return retval;
814
815 if (csw & CSW_ADDRINC_PACKED)
816 ap->packed_transfers = true;
817 else
818 ap->packed_transfers = false;
819
820 /* Packed transfers on TI BE-32 processors do not work correctly in
821 * many cases. */
822 if (dap->ti_be_32_quirks)
823 ap->packed_transfers = false;
824
825 LOG_DEBUG("MEM_AP Packed Transfers: %s",
826 ap->packed_transfers ? "enabled" : "disabled");
827
828 /* The ARM ADI spec leaves implementation-defined whether unaligned
829 * memory accesses work, only work partially, or cause a sticky error.
830 * On TI BE-32 processors, reads seem to return garbage in some bytes
831 * and unaligned writes seem to cause a sticky error.
832 * TODO: it would be nice to have a way to detect whether unaligned
833 * operations are supported on other processors. */
834 ap->unaligned_access_bad = dap->ti_be_32_quirks;
835
836 LOG_DEBUG("MEM_AP CFG: large data %d, long address %d, big-endian %d",
837 !!(cfg & MEM_AP_REG_CFG_LD), !!(cfg & MEM_AP_REG_CFG_LA), !!(cfg & MEM_AP_REG_CFG_BE));
838
839 return ERROR_OK;
840 }
841
842 /**
843 * Put the debug link into SWD mode, if the target supports it.
844 * The link's initial mode may be either JTAG (for example,
845 * with SWJ-DP after reset) or SWD.
846 *
847 * Note that targets using the JTAG-DP do not support SWD, and that
848 * some targets which could otherwise support it may have been
849 * configured to disable SWD signaling
850 *
851 * @param dap The DAP used
852 * @return ERROR_OK or else a fault code.
853 */
854 int dap_to_swd(struct adiv5_dap *dap)
855 {
856 LOG_DEBUG("Enter SWD mode");
857
858 return dap_send_sequence(dap, JTAG_TO_SWD);
859 }
860
861 /**
862 * Put the debug link into JTAG mode, if the target supports it.
863 * The link's initial mode may be either SWD or JTAG.
864 *
865 * Note that targets implemented with SW-DP do not support JTAG, and
866 * that some targets which could otherwise support it may have been
867 * configured to disable JTAG signaling
868 *
869 * @param dap The DAP used
870 * @return ERROR_OK or else a fault code.
871 */
872 int dap_to_jtag(struct adiv5_dap *dap)
873 {
874 LOG_DEBUG("Enter JTAG mode");
875
876 return dap_send_sequence(dap, SWD_TO_JTAG);
877 }
878
879 /* CID interpretation -- see ARM IHI 0029E table B2-7
880 * and ARM IHI 0031E table D1-2.
881 *
882 * From 2009/11/25 commit 21378f58b604:
883 * "OptimoDE DESS" is ARM's semicustom DSPish stuff.
884 * Let's keep it as is, for the time being
885 */
886 static const char *class_description[16] = {
887 [0x0] = "Generic verification component",
888 [0x1] = "ROM table",
889 [0x2] = "Reserved",
890 [0x3] = "Reserved",
891 [0x4] = "Reserved",
892 [0x5] = "Reserved",
893 [0x6] = "Reserved",
894 [0x7] = "Reserved",
895 [0x8] = "Reserved",
896 [0x9] = "CoreSight component",
897 [0xA] = "Reserved",
898 [0xB] = "Peripheral Test Block",
899 [0xC] = "Reserved",
900 [0xD] = "OptimoDE DESS", /* see above */
901 [0xE] = "Generic IP component",
902 [0xF] = "CoreLink, PrimeCell or System component",
903 };
904
905 #define ARCH_ID(architect, archid) ( \
906 (((architect) << ARM_CS_C9_DEVARCH_ARCHITECT_SHIFT) & ARM_CS_C9_DEVARCH_ARCHITECT_MASK) | \
907 (((archid) << ARM_CS_C9_DEVARCH_ARCHID_SHIFT) & ARM_CS_C9_DEVARCH_ARCHID_MASK) \
908 )
909
910 static const struct {
911 uint32_t arch_id;
912 const char *description;
913 } class0x9_devarch[] = {
914 /* keep same unsorted order as in ARM IHI0029E */
915 { ARCH_ID(ARM_ID, 0x0A00), "RAS architecture" },
916 { ARCH_ID(ARM_ID, 0x1A01), "Instrumentation Trace Macrocell (ITM) architecture" },
917 { ARCH_ID(ARM_ID, 0x1A02), "DWT architecture" },
918 { ARCH_ID(ARM_ID, 0x1A03), "Flash Patch and Breakpoint unit (FPB) architecture" },
919 { ARCH_ID(ARM_ID, 0x2A04), "Processor debug architecture (ARMv8-M)" },
920 { ARCH_ID(ARM_ID, 0x6A05), "Processor debug architecture (ARMv8-R)" },
921 { ARCH_ID(ARM_ID, 0x0A10), "PC sample-based profiling" },
922 { ARCH_ID(ARM_ID, 0x4A13), "Embedded Trace Macrocell (ETM) architecture" },
923 { ARCH_ID(ARM_ID, 0x1A14), "Cross Trigger Interface (CTI) architecture" },
924 { ARCH_ID(ARM_ID, 0x6A15), "Processor debug architecture (v8.0-A)" },
925 { ARCH_ID(ARM_ID, 0x7A15), "Processor debug architecture (v8.1-A)" },
926 { ARCH_ID(ARM_ID, 0x8A15), "Processor debug architecture (v8.2-A)" },
927 { ARCH_ID(ARM_ID, 0x2A16), "Processor Performance Monitor (PMU) architecture" },
928 { ARCH_ID(ARM_ID, 0x0A17), "Memory Access Port v2 architecture" },
929 { ARCH_ID(ARM_ID, 0x0A27), "JTAG Access Port v2 architecture" },
930 { ARCH_ID(ARM_ID, 0x0A31), "Basic trace router" },
931 { ARCH_ID(ARM_ID, 0x0A37), "Power requestor" },
932 { ARCH_ID(ARM_ID, 0x0A47), "Unknown Access Port v2 architecture" },
933 { ARCH_ID(ARM_ID, 0x0A50), "HSSTP architecture" },
934 { ARCH_ID(ARM_ID, 0x0A63), "System Trace Macrocell (STM) architecture" },
935 { ARCH_ID(ARM_ID, 0x0A75), "CoreSight ELA architecture" },
936 { ARCH_ID(ARM_ID, 0x0AF7), "CoreSight ROM architecture" },
937 };
938
939 #define DEVARCH_ID_MASK (ARM_CS_C9_DEVARCH_ARCHITECT_MASK | ARM_CS_C9_DEVARCH_ARCHID_MASK)
940 #define DEVARCH_MEM_AP ARCH_ID(ARM_ID, 0x0A17)
941 #define DEVARCH_ROM_C_0X9 ARCH_ID(ARM_ID, 0x0AF7)
942 #define DEVARCH_UNKNOWN_V2 ARCH_ID(ARM_ID, 0x0A47)
943
944 static const char *class0x9_devarch_description(uint32_t devarch)
945 {
946 if (!(devarch & ARM_CS_C9_DEVARCH_PRESENT))
947 return "not present";
948
949 for (unsigned int i = 0; i < ARRAY_SIZE(class0x9_devarch); i++)
950 if ((devarch & DEVARCH_ID_MASK) == class0x9_devarch[i].arch_id)
951 return class0x9_devarch[i].description;
952
953 return "unknown";
954 }
955
956 static const struct {
957 enum ap_type type;
958 const char *description;
959 } ap_types[] = {
960 { AP_TYPE_JTAG_AP, "JTAG-AP" },
961 { AP_TYPE_COM_AP, "COM-AP" },
962 { AP_TYPE_AHB3_AP, "MEM-AP AHB3" },
963 { AP_TYPE_APB_AP, "MEM-AP APB2 or APB3" },
964 { AP_TYPE_AXI_AP, "MEM-AP AXI3 or AXI4" },
965 { AP_TYPE_AHB5_AP, "MEM-AP AHB5" },
966 { AP_TYPE_APB4_AP, "MEM-AP APB4" },
967 { AP_TYPE_AXI5_AP, "MEM-AP AXI5" },
968 { AP_TYPE_AHB5H_AP, "MEM-AP AHB5 with enhanced HPROT" },
969 };
970
971 static const char *ap_type_to_description(enum ap_type type)
972 {
973 for (unsigned int i = 0; i < ARRAY_SIZE(ap_types); i++)
974 if (type == ap_types[i].type)
975 return ap_types[i].description;
976
977 return "Unknown";
978 }
979
980 bool is_ap_num_valid(struct adiv5_dap *dap, uint64_t ap_num)
981 {
982 if (!dap)
983 return false;
984
985 /* no autodetection, by now, so uninitialized is equivalent to ADIv5 for
986 * backward compatibility */
987 if (!is_adiv6(dap)) {
988 if (ap_num > DP_APSEL_MAX)
989 return false;
990 return true;
991 }
992
993 if (is_adiv6(dap)) {
994 if (ap_num & 0x0fffULL)
995 return false;
996 if (dap->asize != 0)
997 if (ap_num & ((~0ULL) << dap->asize))
998 return false;
999 return true;
1000 }
1001
1002 return false;
1003 }
1004
1005 /*
1006 * This function checks the ID for each access port to find the requested Access Port type
1007 * It also calls dap_get_ap() to increment the AP refcount
1008 */
1009 int dap_find_get_ap(struct adiv5_dap *dap, enum ap_type type_to_find, struct adiv5_ap **ap_out)
1010 {
1011 if (is_adiv6(dap)) {
1012 /* TODO: scan the ROM table and detect the AP available */
1013 LOG_DEBUG("On ADIv6 we cannot scan all the possible AP");
1014 return ERROR_FAIL;
1015 }
1016
1017 /* Maximum AP number is 255 since the SELECT register is 8 bits */
1018 for (unsigned int ap_num = 0; ap_num <= DP_APSEL_MAX; ap_num++) {
1019 struct adiv5_ap *ap = dap_get_ap(dap, ap_num);
1020 if (!ap)
1021 continue;
1022
1023 /* read the IDR register of the Access Port */
1024 uint32_t id_val = 0;
1025
1026 int retval = dap_queue_ap_read(ap, AP_REG_IDR(dap), &id_val);
1027 if (retval != ERROR_OK) {
1028 dap_put_ap(ap);
1029 return retval;
1030 }
1031
1032 retval = dap_run(dap);
1033
1034 /* Reading register for a non-existent AP should not cause an error,
1035 * but just to be sure, try to continue searching if an error does happen.
1036 */
1037 if (retval == ERROR_OK && (id_val & AP_TYPE_MASK) == type_to_find) {
1038 LOG_DEBUG("Found %s at AP index: %d (IDR=0x%08" PRIX32 ")",
1039 ap_type_to_description(type_to_find),
1040 ap_num, id_val);
1041
1042 *ap_out = ap;
1043 return ERROR_OK;
1044 }
1045 dap_put_ap(ap);
1046 }
1047
1048 LOG_DEBUG("No %s found", ap_type_to_description(type_to_find));
1049 return ERROR_FAIL;
1050 }
1051
1052 static inline bool is_ap_in_use(struct adiv5_ap *ap)
1053 {
1054 return ap->refcount > 0 || ap->config_ap_never_release;
1055 }
1056
1057 static struct adiv5_ap *_dap_get_ap(struct adiv5_dap *dap, uint64_t ap_num)
1058 {
1059 if (!is_ap_num_valid(dap, ap_num)) {
1060 LOG_ERROR("Invalid AP#0x%" PRIx64, ap_num);
1061 return NULL;
1062 }
1063 if (is_adiv6(dap)) {
1064 for (unsigned int i = 0; i <= DP_APSEL_MAX; i++) {
1065 struct adiv5_ap *ap = &dap->ap[i];
1066 if (is_ap_in_use(ap) && ap->ap_num == ap_num) {
1067 ++ap->refcount;
1068 return ap;
1069 }
1070 }
1071 for (unsigned int i = 0; i <= DP_APSEL_MAX; i++) {
1072 struct adiv5_ap *ap = &dap->ap[i];
1073 if (!is_ap_in_use(ap)) {
1074 ap->ap_num = ap_num;
1075 ++ap->refcount;
1076 return ap;
1077 }
1078 }
1079 LOG_ERROR("No more AP available!");
1080 return NULL;
1081 }
1082
1083 /* ADIv5 */
1084 struct adiv5_ap *ap = &dap->ap[ap_num];
1085 ap->ap_num = ap_num;
1086 ++ap->refcount;
1087 return ap;
1088 }
1089
1090 /* Return AP with specified ap_num. Increment AP refcount */
1091 struct adiv5_ap *dap_get_ap(struct adiv5_dap *dap, uint64_t ap_num)
1092 {
1093 struct adiv5_ap *ap = _dap_get_ap(dap, ap_num);
1094 if (ap)
1095 LOG_DEBUG("refcount AP#0x%" PRIx64 " get %u", ap_num, ap->refcount);
1096 return ap;
1097 }
1098
1099 /* Return AP with specified ap_num. Increment AP refcount and keep it non-zero */
1100 struct adiv5_ap *dap_get_config_ap(struct adiv5_dap *dap, uint64_t ap_num)
1101 {
1102 struct adiv5_ap *ap = _dap_get_ap(dap, ap_num);
1103 if (ap) {
1104 ap->config_ap_never_release = true;
1105 LOG_DEBUG("refcount AP#0x%" PRIx64 " get_config %u", ap_num, ap->refcount);
1106 }
1107 return ap;
1108 }
1109
1110 /* Decrement AP refcount and release the AP when refcount reaches zero */
1111 int dap_put_ap(struct adiv5_ap *ap)
1112 {
1113 if (ap->refcount == 0) {
1114 LOG_ERROR("BUG: refcount AP#0x%" PRIx64 " put underflow", ap->ap_num);
1115 return ERROR_FAIL;
1116 }
1117
1118 --ap->refcount;
1119
1120 LOG_DEBUG("refcount AP#0x%" PRIx64 " put %u", ap->ap_num, ap->refcount);
1121 if (!is_ap_in_use(ap)) {
1122 /* defaults from dap_instance_init() */
1123 ap->ap_num = DP_APSEL_INVALID;
1124 ap->memaccess_tck = 255;
1125 ap->tar_autoincr_block = (1 << 10);
1126 ap->csw_default = CSW_AHB_DEFAULT;
1127 ap->cfg_reg = MEM_AP_REG_CFG_INVALID;
1128 }
1129 return ERROR_OK;
1130 }
1131
1132 static int dap_get_debugbase(struct adiv5_ap *ap,
1133 target_addr_t *dbgbase, uint32_t *apid)
1134 {
1135 struct adiv5_dap *dap = ap->dap;
1136 int retval;
1137 uint32_t baseptr_upper, baseptr_lower;
1138
1139 if (ap->cfg_reg == MEM_AP_REG_CFG_INVALID) {
1140 retval = dap_queue_ap_read(ap, MEM_AP_REG_CFG(dap), &ap->cfg_reg);
1141 if (retval != ERROR_OK)
1142 return retval;
1143 }
1144 retval = dap_queue_ap_read(ap, MEM_AP_REG_BASE(dap), &baseptr_lower);
1145 if (retval != ERROR_OK)
1146 return retval;
1147 retval = dap_queue_ap_read(ap, AP_REG_IDR(dap), apid);
1148 if (retval != ERROR_OK)
1149 return retval;
1150 /* MEM_AP_REG_BASE64 is defined as 'RES0'; can be read and then ignored on 32 bits AP */
1151 if (ap->cfg_reg == MEM_AP_REG_CFG_INVALID || is_64bit_ap(ap)) {
1152 retval = dap_queue_ap_read(ap, MEM_AP_REG_BASE64(dap), &baseptr_upper);
1153 if (retval != ERROR_OK)
1154 return retval;
1155 }
1156
1157 retval = dap_run(dap);
1158 if (retval != ERROR_OK)
1159 return retval;
1160
1161 if (!is_64bit_ap(ap))
1162 baseptr_upper = 0;
1163 *dbgbase = (((target_addr_t)baseptr_upper) << 32) | baseptr_lower;
1164
1165 return ERROR_OK;
1166 }
1167
1168 int adiv6_dap_read_baseptr(struct command_invocation *cmd, struct adiv5_dap *dap, uint64_t *baseptr)
1169 {
1170 uint32_t baseptr_lower, baseptr_upper = 0;
1171 int retval;
1172
1173 if (dap->asize > 32) {
1174 retval = dap_queue_dp_read(dap, DP_BASEPTR1, &baseptr_upper);
1175 if (retval != ERROR_OK)
1176 return retval;
1177 }
1178
1179 retval = dap_dp_read_atomic(dap, DP_BASEPTR0, &baseptr_lower);
1180 if (retval != ERROR_OK)
1181 return retval;
1182
1183 if ((baseptr_lower & DP_BASEPTR0_VALID) != DP_BASEPTR0_VALID) {
1184 command_print(cmd, "System root table not present");
1185 return ERROR_FAIL;
1186 }
1187
1188 baseptr_lower &= ~0x0fff;
1189 *baseptr = (((uint64_t)baseptr_upper) << 32) | baseptr_lower;
1190
1191 return ERROR_OK;
1192 }
1193
1194 /**
1195 * Method to access the CoreSight component.
1196 * On ADIv5, CoreSight components are on the bus behind a MEM-AP.
1197 * On ADIv6, CoreSight components can either be on the bus behind a MEM-AP
1198 * or directly in the AP.
1199 */
1200 enum coresight_access_mode {
1201 CS_ACCESS_AP,
1202 CS_ACCESS_MEM_AP,
1203 };
1204
1205 /** Holds registers and coordinates of a CoreSight component */
1206 struct cs_component_vals {
1207 struct adiv5_ap *ap;
1208 target_addr_t component_base;
1209 uint64_t pid;
1210 uint32_t cid;
1211 uint32_t devarch;
1212 uint32_t devid;
1213 uint32_t devtype_memtype;
1214 enum coresight_access_mode mode;
1215 };
1216
1217 /**
1218 * Helper to read CoreSight component's registers, either on the bus
1219 * behind a MEM-AP or directly in the AP.
1220 *
1221 * @param mode Method to access the component (AP or MEM-AP).
1222 * @param ap Pointer to AP containing the component.
1223 * @param component_base On MEM-AP access method, base address of the component.
1224 * @param reg Offset of the component's register to read.
1225 * @param value Pointer to the store the read value.
1226 *
1227 * @return ERROR_OK on success, else a fault code.
1228 */
1229 static int dap_queue_read_reg(enum coresight_access_mode mode, struct adiv5_ap *ap,
1230 uint64_t component_base, unsigned int reg, uint32_t *value)
1231 {
1232 if (mode == CS_ACCESS_AP)
1233 return dap_queue_ap_read(ap, reg, value);
1234
1235 /* mode == CS_ACCESS_MEM_AP */
1236 return mem_ap_read_u32(ap, component_base + reg, value);
1237 }
1238
1239 /**
1240 * Read the CoreSight registers needed during ROM Table Parsing (RTP).
1241 *
1242 * @param mode Method to access the component (AP or MEM-AP).
1243 * @param ap Pointer to AP containing the component.
1244 * @param component_base On MEM-AP access method, base address of the component.
1245 * @param v Pointer to the struct holding the value of registers.
1246 *
1247 * @return ERROR_OK on success, else a fault code.
1248 */
1249 static int rtp_read_cs_regs(enum coresight_access_mode mode, struct adiv5_ap *ap,
1250 target_addr_t component_base, struct cs_component_vals *v)
1251 {
1252 assert(IS_ALIGNED(component_base, ARM_CS_ALIGN));
1253 assert(ap && v);
1254
1255 uint32_t cid0, cid1, cid2, cid3;
1256 uint32_t pid0, pid1, pid2, pid3, pid4;
1257 int retval = ERROR_OK;
1258
1259 v->ap = ap;
1260 v->component_base = component_base;
1261 v->mode = mode;
1262
1263 /* sort by offset to gain speed */
1264
1265 /*
1266 * Registers DEVARCH, DEVID and DEVTYPE are valid on Class 0x9 devices
1267 * only, but are at offset above 0xf00, so can be read on any device
1268 * without triggering error. Read them for eventual use on Class 0x9.
1269 */
1270 if (retval == ERROR_OK)
1271 retval = dap_queue_read_reg(mode, ap, component_base, ARM_CS_C9_DEVARCH, &v->devarch);
1272
1273 if (retval == ERROR_OK)
1274 retval = dap_queue_read_reg(mode, ap, component_base, ARM_CS_C9_DEVID, &v->devid);
1275
1276 /* Same address as ARM_CS_C1_MEMTYPE */
1277 if (retval == ERROR_OK)
1278 retval = dap_queue_read_reg(mode, ap, component_base, ARM_CS_C9_DEVTYPE, &v->devtype_memtype);
1279
1280 if (retval == ERROR_OK)
1281 retval = dap_queue_read_reg(mode, ap, component_base, ARM_CS_PIDR4, &pid4);
1282
1283 if (retval == ERROR_OK)
1284 retval = dap_queue_read_reg(mode, ap, component_base, ARM_CS_PIDR0, &pid0);
1285 if (retval == ERROR_OK)
1286 retval = dap_queue_read_reg(mode, ap, component_base, ARM_CS_PIDR1, &pid1);
1287 if (retval == ERROR_OK)
1288 retval = dap_queue_read_reg(mode, ap, component_base, ARM_CS_PIDR2, &pid2);
1289 if (retval == ERROR_OK)
1290 retval = dap_queue_read_reg(mode, ap, component_base, ARM_CS_PIDR3, &pid3);
1291
1292 if (retval == ERROR_OK)
1293 retval = dap_queue_read_reg(mode, ap, component_base, ARM_CS_CIDR0, &cid0);
1294 if (retval == ERROR_OK)
1295 retval = dap_queue_read_reg(mode, ap, component_base, ARM_CS_CIDR1, &cid1);
1296 if (retval == ERROR_OK)
1297 retval = dap_queue_read_reg(mode, ap, component_base, ARM_CS_CIDR2, &cid2);
1298 if (retval == ERROR_OK)
1299 retval = dap_queue_read_reg(mode, ap, component_base, ARM_CS_CIDR3, &cid3);
1300
1301 if (retval == ERROR_OK)
1302 retval = dap_run(ap->dap);
1303 if (retval != ERROR_OK) {
1304 LOG_DEBUG("Failed read CoreSight registers");
1305 return retval;
1306 }
1307
1308 v->cid = (cid3 & 0xff) << 24
1309 | (cid2 & 0xff) << 16
1310 | (cid1 & 0xff) << 8
1311 | (cid0 & 0xff);
1312 v->pid = (uint64_t)(pid4 & 0xff) << 32
1313 | (pid3 & 0xff) << 24
1314 | (pid2 & 0xff) << 16
1315 | (pid1 & 0xff) << 8
1316 | (pid0 & 0xff);
1317
1318 return ERROR_OK;
1319 }
1320
1321 /* Part number interpretations are from Cortex
1322 * core specs, the CoreSight components TRM
1323 * (ARM DDI 0314H), CoreSight System Design
1324 * Guide (ARM DGI 0012D) and ETM specs; also
1325 * from chip observation (e.g. TI SDTI).
1326 */
1327
1328 static const struct dap_part_nums {
1329 uint16_t designer_id;
1330 uint16_t part_num;
1331 const char *type;
1332 const char *full;
1333 } dap_part_nums[] = {
1334 { ARM_ID, 0x000, "Cortex-M3 SCS", "(System Control Space)", },
1335 { ARM_ID, 0x001, "Cortex-M3 ITM", "(Instrumentation Trace Module)", },
1336 { ARM_ID, 0x002, "Cortex-M3 DWT", "(Data Watchpoint and Trace)", },
1337 { ARM_ID, 0x003, "Cortex-M3 FPB", "(Flash Patch and Breakpoint)", },
1338 { ARM_ID, 0x008, "Cortex-M0 SCS", "(System Control Space)", },
1339 { ARM_ID, 0x00a, "Cortex-M0 DWT", "(Data Watchpoint and Trace)", },
1340 { ARM_ID, 0x00b, "Cortex-M0 BPU", "(Breakpoint Unit)", },
1341 { ARM_ID, 0x00c, "Cortex-M4 SCS", "(System Control Space)", },
1342 { ARM_ID, 0x00d, "CoreSight ETM11", "(Embedded Trace)", },
1343 { ARM_ID, 0x00e, "Cortex-M7 FPB", "(Flash Patch and Breakpoint)", },
1344 { ARM_ID, 0x193, "SoC-600 TSGEN", "(Timestamp Generator)", },
1345 { ARM_ID, 0x470, "Cortex-M1 ROM", "(ROM Table)", },
1346 { ARM_ID, 0x471, "Cortex-M0 ROM", "(ROM Table)", },
1347 { ARM_ID, 0x490, "Cortex-A15 GIC", "(Generic Interrupt Controller)", },
1348 { ARM_ID, 0x492, "Cortex-R52 GICD", "(Distributor)", },
1349 { ARM_ID, 0x493, "Cortex-R52 GICR", "(Redistributor)", },
1350 { ARM_ID, 0x4a1, "Cortex-A53 ROM", "(v8 Memory Map ROM Table)", },
1351 { ARM_ID, 0x4a2, "Cortex-A57 ROM", "(ROM Table)", },
1352 { ARM_ID, 0x4a3, "Cortex-A53 ROM", "(v7 Memory Map ROM Table)", },
1353 { ARM_ID, 0x4a4, "Cortex-A72 ROM", "(ROM Table)", },
1354 { ARM_ID, 0x4a9, "Cortex-A9 ROM", "(ROM Table)", },
1355 { ARM_ID, 0x4aa, "Cortex-A35 ROM", "(v8 Memory Map ROM Table)", },
1356 { ARM_ID, 0x4af, "Cortex-A15 ROM", "(ROM Table)", },
1357 { ARM_ID, 0x4b5, "Cortex-R5 ROM", "(ROM Table)", },
1358 { ARM_ID, 0x4b8, "Cortex-R52 ROM", "(ROM Table)", },
1359 { ARM_ID, 0x4c0, "Cortex-M0+ ROM", "(ROM Table)", },
1360 { ARM_ID, 0x4c3, "Cortex-M3 ROM", "(ROM Table)", },
1361 { ARM_ID, 0x4c4, "Cortex-M4 ROM", "(ROM Table)", },
1362 { ARM_ID, 0x4c7, "Cortex-M7 PPB ROM", "(Private Peripheral Bus ROM Table)", },
1363 { ARM_ID, 0x4c8, "Cortex-M7 ROM", "(ROM Table)", },
1364 { ARM_ID, 0x4e0, "Cortex-A35 ROM", "(v7 Memory Map ROM Table)", },
1365 { ARM_ID, 0x4e4, "Cortex-A76 ROM", "(ROM Table)", },
1366 { ARM_ID, 0x906, "CoreSight CTI", "(Cross Trigger)", },
1367 { ARM_ID, 0x907, "CoreSight ETB", "(Trace Buffer)", },
1368 { ARM_ID, 0x908, "CoreSight CSTF", "(Trace Funnel)", },
1369 { ARM_ID, 0x909, "CoreSight ATBR", "(Advanced Trace Bus Replicator)", },
1370 { ARM_ID, 0x910, "CoreSight ETM9", "(Embedded Trace)", },
1371 { ARM_ID, 0x912, "CoreSight TPIU", "(Trace Port Interface Unit)", },
1372 { ARM_ID, 0x913, "CoreSight ITM", "(Instrumentation Trace Macrocell)", },
1373 { ARM_ID, 0x914, "CoreSight SWO", "(Single Wire Output)", },
1374 { ARM_ID, 0x917, "CoreSight HTM", "(AHB Trace Macrocell)", },
1375 { ARM_ID, 0x920, "CoreSight ETM11", "(Embedded Trace)", },
1376 { ARM_ID, 0x921, "Cortex-A8 ETM", "(Embedded Trace)", },
1377 { ARM_ID, 0x922, "Cortex-A8 CTI", "(Cross Trigger)", },
1378 { ARM_ID, 0x923, "Cortex-M3 TPIU", "(Trace Port Interface Unit)", },
1379 { ARM_ID, 0x924, "Cortex-M3 ETM", "(Embedded Trace)", },
1380 { ARM_ID, 0x925, "Cortex-M4 ETM", "(Embedded Trace)", },
1381 { ARM_ID, 0x930, "Cortex-R4 ETM", "(Embedded Trace)", },
1382 { ARM_ID, 0x931, "Cortex-R5 ETM", "(Embedded Trace)", },
1383 { ARM_ID, 0x932, "CoreSight MTB-M0+", "(Micro Trace Buffer)", },
1384 { ARM_ID, 0x941, "CoreSight TPIU-Lite", "(Trace Port Interface Unit)", },
1385 { ARM_ID, 0x950, "Cortex-A9 PTM", "(Program Trace Macrocell)", },
1386 { ARM_ID, 0x955, "Cortex-A5 ETM", "(Embedded Trace)", },
1387 { ARM_ID, 0x95a, "Cortex-A72 ETM", "(Embedded Trace)", },
1388 { ARM_ID, 0x95b, "Cortex-A17 PTM", "(Program Trace Macrocell)", },
1389 { ARM_ID, 0x95d, "Cortex-A53 ETM", "(Embedded Trace)", },
1390 { ARM_ID, 0x95e, "Cortex-A57 ETM", "(Embedded Trace)", },
1391 { ARM_ID, 0x95f, "Cortex-A15 PTM", "(Program Trace Macrocell)", },
1392 { ARM_ID, 0x961, "CoreSight TMC", "(Trace Memory Controller)", },
1393 { ARM_ID, 0x962, "CoreSight STM", "(System Trace Macrocell)", },
1394 { ARM_ID, 0x975, "Cortex-M7 ETM", "(Embedded Trace)", },
1395 { ARM_ID, 0x9a0, "CoreSight PMU", "(Performance Monitoring Unit)", },
1396 { ARM_ID, 0x9a1, "Cortex-M4 TPIU", "(Trace Port Interface Unit)", },
1397 { ARM_ID, 0x9a4, "CoreSight GPR", "(Granular Power Requester)", },
1398 { ARM_ID, 0x9a5, "Cortex-A5 PMU", "(Performance Monitor Unit)", },
1399 { ARM_ID, 0x9a7, "Cortex-A7 PMU", "(Performance Monitor Unit)", },
1400 { ARM_ID, 0x9a8, "Cortex-A53 CTI", "(Cross Trigger)", },
1401 { ARM_ID, 0x9a9, "Cortex-M7 TPIU", "(Trace Port Interface Unit)", },
1402 { ARM_ID, 0x9ae, "Cortex-A17 PMU", "(Performance Monitor Unit)", },
1403 { ARM_ID, 0x9af, "Cortex-A15 PMU", "(Performance Monitor Unit)", },
1404 { ARM_ID, 0x9b6, "Cortex-R52 PMU/CTI/ETM", "(Performance Monitor Unit/Cross Trigger/ETM)", },
1405 { ARM_ID, 0x9b7, "Cortex-R7 PMU", "(Performance Monitor Unit)", },
1406 { ARM_ID, 0x9d3, "Cortex-A53 PMU", "(Performance Monitor Unit)", },
1407 { ARM_ID, 0x9d7, "Cortex-A57 PMU", "(Performance Monitor Unit)", },
1408 { ARM_ID, 0x9d8, "Cortex-A72 PMU", "(Performance Monitor Unit)", },
1409 { ARM_ID, 0x9da, "Cortex-A35 PMU/CTI/ETM", "(Performance Monitor Unit/Cross Trigger/ETM)", },
1410 { ARM_ID, 0x9e2, "SoC-600 APB-AP", "(APB4 Memory Access Port)", },
1411 { ARM_ID, 0x9e3, "SoC-600 AHB-AP", "(AHB5 Memory Access Port)", },
1412 { ARM_ID, 0x9e4, "SoC-600 AXI-AP", "(AXI Memory Access Port)", },
1413 { ARM_ID, 0x9e5, "SoC-600 APv1 Adapter", "(Access Port v1 Adapter)", },
1414 { ARM_ID, 0x9e6, "SoC-600 JTAG-AP", "(JTAG Access Port)", },
1415 { ARM_ID, 0x9e7, "SoC-600 TPIU", "(Trace Port Interface Unit)", },
1416 { ARM_ID, 0x9e8, "SoC-600 TMC ETR/ETS", "(Embedded Trace Router/Streamer)", },
1417 { ARM_ID, 0x9e9, "SoC-600 TMC ETB", "(Embedded Trace Buffer)", },
1418 { ARM_ID, 0x9ea, "SoC-600 TMC ETF", "(Embedded Trace FIFO)", },
1419 { ARM_ID, 0x9eb, "SoC-600 ATB Funnel", "(Trace Funnel)", },
1420 { ARM_ID, 0x9ec, "SoC-600 ATB Replicator", "(Trace Replicator)", },
1421 { ARM_ID, 0x9ed, "SoC-600 CTI", "(Cross Trigger)", },
1422 { ARM_ID, 0x9ee, "SoC-600 CATU", "(Address Translation Unit)", },
1423 { ARM_ID, 0xc05, "Cortex-A5 Debug", "(Debug Unit)", },
1424 { ARM_ID, 0xc07, "Cortex-A7 Debug", "(Debug Unit)", },
1425 { ARM_ID, 0xc08, "Cortex-A8 Debug", "(Debug Unit)", },
1426 { ARM_ID, 0xc09, "Cortex-A9 Debug", "(Debug Unit)", },
1427 { ARM_ID, 0xc0e, "Cortex-A17 Debug", "(Debug Unit)", },
1428 { ARM_ID, 0xc0f, "Cortex-A15 Debug", "(Debug Unit)", },
1429 { ARM_ID, 0xc14, "Cortex-R4 Debug", "(Debug Unit)", },
1430 { ARM_ID, 0xc15, "Cortex-R5 Debug", "(Debug Unit)", },
1431 { ARM_ID, 0xc17, "Cortex-R7 Debug", "(Debug Unit)", },
1432 { ARM_ID, 0xd03, "Cortex-A53 Debug", "(Debug Unit)", },
1433 { ARM_ID, 0xd04, "Cortex-A35 Debug", "(Debug Unit)", },
1434 { ARM_ID, 0xd07, "Cortex-A57 Debug", "(Debug Unit)", },
1435 { ARM_ID, 0xd08, "Cortex-A72 Debug", "(Debug Unit)", },
1436 { ARM_ID, 0xd0b, "Cortex-A76 Debug", "(Debug Unit)", },
1437 { ARM_ID, 0xd0c, "Neoverse N1", "(Debug Unit)", },
1438 { ARM_ID, 0xd13, "Cortex-R52 Debug", "(Debug Unit)", },
1439 { ARM_ID, 0xd49, "Neoverse N2", "(Debug Unit)", },
1440 { 0x017, 0x120, "TI SDTI", "(System Debug Trace Interface)", }, /* from OMAP3 memmap */
1441 { 0x017, 0x343, "TI DAPCTL", "", }, /* from OMAP3 memmap */
1442 { 0x017, 0x9af, "MSP432 ROM", "(ROM Table)" },
1443 { 0x01f, 0xcd0, "Atmel CPU with DSU", "(CPU)" },
1444 { 0x041, 0x1db, "XMC4500 ROM", "(ROM Table)" },
1445 { 0x041, 0x1df, "XMC4700/4800 ROM", "(ROM Table)" },
1446 { 0x041, 0x1ed, "XMC1000 ROM", "(ROM Table)" },
1447 { 0x065, 0x000, "SHARC+/Blackfin+", "", },
1448 { 0x070, 0x440, "Qualcomm QDSS Component v1", "(Qualcomm Designed CoreSight Component v1)", },
1449 { 0x0bf, 0x100, "Brahma-B53 Debug", "(Debug Unit)", },
1450 { 0x0bf, 0x9d3, "Brahma-B53 PMU", "(Performance Monitor Unit)", },
1451 { 0x0bf, 0x4a1, "Brahma-B53 ROM", "(ROM Table)", },
1452 { 0x0bf, 0x721, "Brahma-B53 ROM", "(ROM Table)", },
1453 { 0x1eb, 0x181, "Tegra 186 ROM", "(ROM Table)", },
1454 { 0x1eb, 0x202, "Denver ETM", "(Denver Embedded Trace)", },
1455 { 0x1eb, 0x211, "Tegra 210 ROM", "(ROM Table)", },
1456 { 0x1eb, 0x302, "Denver Debug", "(Debug Unit)", },
1457 { 0x1eb, 0x402, "Denver PMU", "(Performance Monitor Unit)", },
1458 };
1459
1460 static const struct dap_part_nums *pidr_to_part_num(unsigned int designer_id, unsigned int part_num)
1461 {
1462 static const struct dap_part_nums unknown = {
1463 .type = "Unrecognized",
1464 .full = "",
1465 };
1466
1467 for (unsigned int i = 0; i < ARRAY_SIZE(dap_part_nums); i++)
1468 if (dap_part_nums[i].designer_id == designer_id && dap_part_nums[i].part_num == part_num)
1469 return &dap_part_nums[i];
1470
1471 return &unknown;
1472 }
1473
1474 static int dap_devtype_display(struct command_invocation *cmd, uint32_t devtype)
1475 {
1476 const char *major = "Reserved", *subtype = "Reserved";
1477 const unsigned int minor = (devtype & ARM_CS_C9_DEVTYPE_SUB_MASK) >> ARM_CS_C9_DEVTYPE_SUB_SHIFT;
1478 const unsigned int devtype_major = (devtype & ARM_CS_C9_DEVTYPE_MAJOR_MASK) >> ARM_CS_C9_DEVTYPE_MAJOR_SHIFT;
1479 switch (devtype_major) {
1480 case 0:
1481 major = "Miscellaneous";
1482 switch (minor) {
1483 case 0:
1484 subtype = "other";
1485 break;
1486 case 4:
1487 subtype = "Validation component";
1488 break;
1489 }
1490 break;
1491 case 1:
1492 major = "Trace Sink";
1493 switch (minor) {
1494 case 0:
1495 subtype = "other";
1496 break;
1497 case 1:
1498 subtype = "Port";
1499 break;
1500 case 2:
1501 subtype = "Buffer";
1502 break;
1503 case 3:
1504 subtype = "Router";
1505 break;
1506 }
1507 break;
1508 case 2:
1509 major = "Trace Link";
1510 switch (minor) {
1511 case 0:
1512 subtype = "other";
1513 break;
1514 case 1:
1515 subtype = "Funnel, router";
1516 break;
1517 case 2:
1518 subtype = "Filter";
1519 break;
1520 case 3:
1521 subtype = "FIFO, buffer";
1522 break;
1523 }
1524 break;
1525 case 3:
1526 major = "Trace Source";
1527 switch (minor) {
1528 case 0:
1529 subtype = "other";
1530 break;
1531 case 1:
1532 subtype = "Processor";
1533 break;
1534 case 2:
1535 subtype = "DSP";
1536 break;
1537 case 3:
1538 subtype = "Engine/Coprocessor";
1539 break;
1540 case 4:
1541 subtype = "Bus";
1542 break;
1543 case 6:
1544 subtype = "Software";
1545 break;
1546 }
1547 break;
1548 case 4:
1549 major = "Debug Control";
1550 switch (minor) {
1551 case 0:
1552 subtype = "other";
1553 break;
1554 case 1:
1555 subtype = "Trigger Matrix";
1556 break;
1557 case 2:
1558 subtype = "Debug Auth";
1559 break;
1560 case 3:
1561 subtype = "Power Requestor";
1562 break;
1563 }
1564 break;
1565 case 5:
1566 major = "Debug Logic";
1567 switch (minor) {
1568 case 0:
1569 subtype = "other";
1570 break;
1571 case 1:
1572 subtype = "Processor";
1573 break;
1574 case 2:
1575 subtype = "DSP";
1576 break;
1577 case 3:
1578 subtype = "Engine/Coprocessor";
1579 break;
1580 case 4:
1581 subtype = "Bus";
1582 break;
1583 case 5:
1584 subtype = "Memory";
1585 break;
1586 }
1587 break;
1588 case 6:
1589 major = "Performance Monitor";
1590 switch (minor) {
1591 case 0:
1592 subtype = "other";
1593 break;
1594 case 1:
1595 subtype = "Processor";
1596 break;
1597 case 2:
1598 subtype = "DSP";
1599 break;
1600 case 3:
1601 subtype = "Engine/Coprocessor";
1602 break;
1603 case 4:
1604 subtype = "Bus";
1605 break;
1606 case 5:
1607 subtype = "Memory";
1608 break;
1609 }
1610 break;
1611 }
1612 command_print(cmd, "\t\tType is 0x%02x, %s, %s",
1613 devtype & ARM_CS_C9_DEVTYPE_MASK,
1614 major, subtype);
1615 return ERROR_OK;
1616 }
1617
1618 /**
1619 * Actions/operations to be executed while parsing ROM tables.
1620 */
1621 struct rtp_ops {
1622 /**
1623 * Executed at the start of a new AP, typically to print the AP header.
1624 * @param ap Pointer to AP.
1625 * @param depth The current depth level of ROM table.
1626 * @param priv Pointer to private data.
1627 * @return ERROR_OK on success, else a fault code.
1628 */
1629 int (*ap_header)(struct adiv5_ap *ap, int depth, void *priv);
1630 /**
1631 * Executed at the start of a new MEM-AP, typically to print the MEM-AP header.
1632 * @param retval Error encountered while reading AP.
1633 * @param ap Pointer to AP.
1634 * @param dbgbase Value of MEM-AP Debug Base Address register.
1635 * @param apid Value of MEM-AP IDR Identification Register.
1636 * @param depth The current depth level of ROM table.
1637 * @param priv Pointer to private data.
1638 * @return ERROR_OK on success, else a fault code.
1639 */
1640 int (*mem_ap_header)(int retval, struct adiv5_ap *ap, uint64_t dbgbase,
1641 uint32_t apid, int depth, void *priv);
1642 /**
1643 * Executed when a CoreSight component is parsed, typically to print
1644 * information on the component.
1645 * @param retval Error encountered while reading component's registers.
1646 * @param v Pointer to a container of the component's registers.
1647 * @param depth The current depth level of ROM table.
1648 * @param priv Pointer to private data.
1649 * @return ERROR_OK on success, else a fault code.
1650 */
1651 int (*cs_component)(int retval, struct cs_component_vals *v, int depth, void *priv);
1652 /**
1653 * Executed for each entry of a ROM table, typically to print the entry
1654 * and information about validity or end-of-table mark.
1655 * @param retval Error encountered while reading the ROM table entry.
1656 * @param depth The current depth level of ROM table.
1657 * @param offset The offset of the entry in the ROM table.
1658 * @param romentry The value of the ROM table entry.
1659 * @param priv Pointer to private data.
1660 * @return ERROR_OK on success, else a fault code.
1661 */
1662 int (*rom_table_entry)(int retval, int depth, unsigned int offset, uint64_t romentry,
1663 void *priv);
1664 /**
1665 * Private data
1666 */
1667 void *priv;
1668 };
1669
1670 /**
1671 * Wrapper around struct rtp_ops::ap_header.
1672 */
1673 static int rtp_ops_ap_header(const struct rtp_ops *ops,
1674 struct adiv5_ap *ap, int depth)
1675 {
1676 if (ops->ap_header)
1677 return ops->ap_header(ap, depth, ops->priv);
1678
1679 return ERROR_OK;
1680 }
1681
1682 /**
1683 * Wrapper around struct rtp_ops::mem_ap_header.
1684 * Input parameter @a retval is propagated.
1685 */
1686 static int rtp_ops_mem_ap_header(const struct rtp_ops *ops,
1687 int retval, struct adiv5_ap *ap, uint64_t dbgbase, uint32_t apid, int depth)
1688 {
1689 if (!ops->mem_ap_header)
1690 return retval;
1691
1692 int retval1 = ops->mem_ap_header(retval, ap, dbgbase, apid, depth, ops->priv);
1693 if (retval != ERROR_OK)
1694 return retval;
1695 return retval1;
1696 }
1697
1698 /**
1699 * Wrapper around struct rtp_ops::cs_component.
1700 * Input parameter @a retval is propagated.
1701 */
1702 static int rtp_ops_cs_component(const struct rtp_ops *ops,
1703 int retval, struct cs_component_vals *v, int depth)
1704 {
1705 if (!ops->cs_component)
1706 return retval;
1707
1708 int retval1 = ops->cs_component(retval, v, depth, ops->priv);
1709 if (retval != ERROR_OK)
1710 return retval;
1711 return retval1;
1712 }
1713
1714 /**
1715 * Wrapper around struct rtp_ops::rom_table_entry.
1716 * Input parameter @a retval is propagated.
1717 */
1718 static int rtp_ops_rom_table_entry(const struct rtp_ops *ops,
1719 int retval, int depth, unsigned int offset, uint64_t romentry)
1720 {
1721 if (!ops->rom_table_entry)
1722 return retval;
1723
1724 int retval1 = ops->rom_table_entry(retval, depth, offset, romentry, ops->priv);
1725 if (retval != ERROR_OK)
1726 return retval;
1727 return retval1;
1728 }
1729
1730 /* Broken ROM tables can have circular references. Stop after a while */
1731 #define ROM_TABLE_MAX_DEPTH (16)
1732
1733 /**
1734 * Value used only during lookup of a CoreSight component in ROM table.
1735 * Return CORESIGHT_COMPONENT_FOUND when component is found.
1736 * Return ERROR_OK when component is not found yet.
1737 * Return any other ERROR_* in case of error.
1738 */
1739 #define CORESIGHT_COMPONENT_FOUND (1)
1740
1741 static int rtp_ap(const struct rtp_ops *ops, struct adiv5_ap *ap, int depth);
1742 static int rtp_cs_component(enum coresight_access_mode mode, const struct rtp_ops *ops,
1743 struct adiv5_ap *ap, target_addr_t dbgbase, bool *is_mem_ap, int depth);
1744
1745 static int rtp_rom_loop(enum coresight_access_mode mode, const struct rtp_ops *ops,
1746 struct adiv5_ap *ap, target_addr_t base_address, int depth,
1747 unsigned int width, unsigned int max_entries)
1748 {
1749 /* ADIv6 AP ROM table provide offset from current AP */
1750 if (mode == CS_ACCESS_AP)
1751 base_address = ap->ap_num;
1752
1753 assert(IS_ALIGNED(base_address, ARM_CS_ALIGN));
1754
1755 unsigned int offset = 0;
1756 while (max_entries--) {
1757 uint64_t romentry;
1758 uint32_t romentry_low, romentry_high;
1759 target_addr_t component_base;
1760 unsigned int saved_offset = offset;
1761
1762 int retval = dap_queue_read_reg(mode, ap, base_address, offset, &romentry_low);
1763 offset += 4;
1764 if (retval == ERROR_OK && width == 64) {
1765 retval = dap_queue_read_reg(mode, ap, base_address, offset, &romentry_high);
1766 offset += 4;
1767 }
1768 if (retval == ERROR_OK)
1769 retval = dap_run(ap->dap);
1770 if (retval != ERROR_OK) {
1771 LOG_DEBUG("Failed read ROM table entry");
1772 return retval;
1773 }
1774
1775 if (width == 64) {
1776 romentry = (((uint64_t)romentry_high) << 32) | romentry_low;
1777 component_base = base_address +
1778 ((((uint64_t)romentry_high) << 32) | (romentry_low & ARM_CS_ROMENTRY_OFFSET_MASK));
1779 } else {
1780 romentry = romentry_low;
1781 /* "romentry" is signed */
1782 component_base = base_address + (int32_t)(romentry_low & ARM_CS_ROMENTRY_OFFSET_MASK);
1783 if (!is_64bit_ap(ap))
1784 component_base = (uint32_t)component_base;
1785 }
1786 retval = rtp_ops_rom_table_entry(ops, retval, depth, saved_offset, romentry);
1787 if (retval != ERROR_OK)
1788 return retval;
1789
1790 if (romentry == 0) {
1791 /* End of ROM table */
1792 break;
1793 }
1794
1795 if (!(romentry & ARM_CS_ROMENTRY_PRESENT))
1796 continue;
1797
1798 /* Recurse */
1799 if (mode == CS_ACCESS_AP) {
1800 struct adiv5_ap *next_ap = dap_get_ap(ap->dap, component_base);
1801 if (!next_ap) {
1802 LOG_DEBUG("Wrong AP # 0x%" PRIx64, component_base);
1803 continue;
1804 }
1805 retval = rtp_ap(ops, next_ap, depth + 1);
1806 dap_put_ap(next_ap);
1807 } else {
1808 /* mode == CS_ACCESS_MEM_AP */
1809 retval = rtp_cs_component(mode, ops, ap, component_base, NULL, depth + 1);
1810 }
1811 if (retval == CORESIGHT_COMPONENT_FOUND)
1812 return CORESIGHT_COMPONENT_FOUND;
1813 if (retval != ERROR_OK) {
1814 /* TODO: do we need to send an ABORT before continuing? */
1815 LOG_DEBUG("Ignore error parsing CoreSight component");
1816 continue;
1817 }
1818 }
1819
1820 return ERROR_OK;
1821 }
1822
1823 static int rtp_cs_component(enum coresight_access_mode mode, const struct rtp_ops *ops,
1824 struct adiv5_ap *ap, target_addr_t base_address, bool *is_mem_ap, int depth)
1825 {
1826 struct cs_component_vals v;
1827 int retval;
1828
1829 assert(IS_ALIGNED(base_address, ARM_CS_ALIGN));
1830
1831 if (is_mem_ap)
1832 *is_mem_ap = false;
1833
1834 if (depth > ROM_TABLE_MAX_DEPTH)
1835 retval = ERROR_FAIL;
1836 else
1837 retval = rtp_read_cs_regs(mode, ap, base_address, &v);
1838
1839 retval = rtp_ops_cs_component(ops, retval, &v, depth);
1840 if (retval == CORESIGHT_COMPONENT_FOUND)
1841 return CORESIGHT_COMPONENT_FOUND;
1842 if (retval != ERROR_OK)
1843 return ERROR_OK; /* Don't abort recursion */
1844
1845 if (!is_valid_arm_cs_cidr(v.cid))
1846 return ERROR_OK; /* Don't abort recursion */
1847
1848 const unsigned int class = ARM_CS_CIDR_CLASS(v.cid);
1849
1850 if (class == ARM_CS_CLASS_0X1_ROM_TABLE)
1851 return rtp_rom_loop(mode, ops, ap, base_address, depth, 32, 960);
1852
1853 if (class == ARM_CS_CLASS_0X9_CS_COMPONENT) {
1854 if ((v.devarch & ARM_CS_C9_DEVARCH_PRESENT) == 0)
1855 return ERROR_OK;
1856
1857 if (is_mem_ap) {
1858 if ((v.devarch & DEVARCH_ID_MASK) == DEVARCH_MEM_AP)
1859 *is_mem_ap = true;
1860
1861 /* SoC-600 APv1 Adapter */
1862 if ((v.devarch & DEVARCH_ID_MASK) == DEVARCH_UNKNOWN_V2 &&
1863 ARM_CS_PIDR_DESIGNER(v.pid) == ARM_ID &&
1864 ARM_CS_PIDR_PART(v.pid) == 0x9e5)
1865 *is_mem_ap = true;
1866 }
1867
1868 /* quit if not ROM table */
1869 if ((v.devarch & DEVARCH_ID_MASK) != DEVARCH_ROM_C_0X9)
1870 return ERROR_OK;
1871
1872 if ((v.devid & ARM_CS_C9_DEVID_FORMAT_MASK) == ARM_CS_C9_DEVID_FORMAT_64BIT)
1873 return rtp_rom_loop(mode, ops, ap, base_address, depth, 64, 256);
1874 else
1875 return rtp_rom_loop(mode, ops, ap, base_address, depth, 32, 512);
1876 }
1877
1878 /* Class other than 0x1 and 0x9 */
1879 return ERROR_OK;
1880 }
1881
1882 static int rtp_ap(const struct rtp_ops *ops, struct adiv5_ap *ap, int depth)
1883 {
1884 uint32_t apid;
1885 target_addr_t dbgbase, invalid_entry;
1886
1887 int retval = rtp_ops_ap_header(ops, ap, depth);
1888 if (retval != ERROR_OK || depth > ROM_TABLE_MAX_DEPTH)
1889 return ERROR_OK; /* Don't abort recursion */
1890
1891 if (is_adiv6(ap->dap)) {
1892 bool is_mem_ap;
1893 retval = rtp_cs_component(CS_ACCESS_AP, ops, ap, 0, &is_mem_ap, depth);
1894 if (retval == CORESIGHT_COMPONENT_FOUND)
1895 return CORESIGHT_COMPONENT_FOUND;
1896 if (retval != ERROR_OK)
1897 return ERROR_OK; /* Don't abort recursion */
1898
1899 if (!is_mem_ap)
1900 return ERROR_OK;
1901 /* Continue for an ADIv6 MEM-AP or SoC-600 APv1 Adapter */
1902 }
1903
1904 /* Now we read ROM table ID registers, ref. ARM IHI 0029B sec */
1905 retval = dap_get_debugbase(ap, &dbgbase, &apid);
1906 if (retval != ERROR_OK)
1907 return retval;
1908 retval = rtp_ops_mem_ap_header(ops, retval, ap, dbgbase, apid, depth);
1909 if (retval != ERROR_OK)
1910 return retval;
1911
1912 if (apid == 0)
1913 return ERROR_FAIL;
1914
1915 /* NOTE: a MEM-AP may have a single CoreSight component that's
1916 * not a ROM table ... or have no such components at all.
1917 */
1918 const unsigned int class = (apid & AP_REG_IDR_CLASS_MASK) >> AP_REG_IDR_CLASS_SHIFT;
1919
1920 if (class == AP_REG_IDR_CLASS_MEM_AP) {
1921 if (is_64bit_ap(ap))
1922 invalid_entry = 0xFFFFFFFFFFFFFFFFull;
1923 else
1924 invalid_entry = 0xFFFFFFFFul;
1925
1926 if (dbgbase != invalid_entry && (dbgbase & 0x3) != 0x2) {
1927 retval = rtp_cs_component(CS_ACCESS_MEM_AP, ops, ap,
1928 dbgbase & 0xFFFFFFFFFFFFF000ull, NULL, depth);
1929 if (retval == CORESIGHT_COMPONENT_FOUND)
1930 return CORESIGHT_COMPONENT_FOUND;
1931 }
1932 }
1933
1934 return ERROR_OK;
1935 }
1936
1937 /* Actions for command "dap info" */
1938
1939 static int dap_info_ap_header(struct adiv5_ap *ap, int depth, void *priv)
1940 {
1941 struct command_invocation *cmd = priv;
1942
1943 if (depth > ROM_TABLE_MAX_DEPTH) {
1944 command_print(cmd, "\tTables too deep");
1945 return ERROR_FAIL;
1946 }
1947
1948 command_print(cmd, "%sAP # 0x%" PRIx64, (depth) ? "\t\t" : "", ap->ap_num);
1949 return ERROR_OK;
1950 }
1951
1952 static int dap_info_mem_ap_header(int retval, struct adiv5_ap *ap,
1953 target_addr_t dbgbase, uint32_t apid, int depth, void *priv)
1954 {
1955 struct command_invocation *cmd = priv;
1956 target_addr_t invalid_entry;
1957 char tabs[17] = "";
1958
1959 if (retval != ERROR_OK) {
1960 command_print(cmd, "\t\tCan't read MEM-AP, the corresponding core might be turned off");
1961 return retval;
1962 }
1963
1964 if (depth > ROM_TABLE_MAX_DEPTH) {
1965 command_print(cmd, "\tTables too deep");
1966 return ERROR_FAIL;
1967 }
1968
1969 if (depth)
1970 snprintf(tabs, sizeof(tabs), "\t[L%02d] ", depth);
1971
1972 command_print(cmd, "\t\tAP ID register 0x%8.8" PRIx32, apid);
1973 if (apid == 0) {
1974 command_print(cmd, "\t\tNo AP found at this AP#0x%" PRIx64, ap->ap_num);
1975 return ERROR_FAIL;
1976 }
1977
1978 command_print(cmd, "\t\tType is %s", ap_type_to_description(apid & AP_TYPE_MASK));
1979
1980 /* NOTE: a MEM-AP may have a single CoreSight component that's
1981 * not a ROM table ... or have no such components at all.
1982 */
1983 const unsigned int class = (apid & AP_REG_IDR_CLASS_MASK) >> AP_REG_IDR_CLASS_SHIFT;
1984
1985 if (class == AP_REG_IDR_CLASS_MEM_AP) {
1986 if (is_64bit_ap(ap))
1987 invalid_entry = 0xFFFFFFFFFFFFFFFFull;
1988 else
1989 invalid_entry = 0xFFFFFFFFul;
1990
1991 command_print(cmd, "%sMEM-AP BASE " TARGET_ADDR_FMT, tabs, dbgbase);
1992
1993 if (dbgbase == invalid_entry || (dbgbase & 0x3) == 0x2) {
1994 command_print(cmd, "\t\tNo ROM table present");
1995 } else {
1996 if (dbgbase & 0x01)
1997 command_print(cmd, "\t\tValid ROM table present");
1998 else
1999 command_print(cmd, "\t\tROM table in legacy format");
2000 }
2001 }
2002
2003 return ERROR_OK;
2004 }
2005
2006 static int dap_info_cs_component(int retval, struct cs_component_vals *v, int depth, void *priv)
2007 {
2008 struct command_invocation *cmd = priv;
2009
2010 if (depth > ROM_TABLE_MAX_DEPTH) {
2011 command_print(cmd, "\tTables too deep");
2012 return ERROR_FAIL;
2013 }
2014
2015 if (v->mode == CS_ACCESS_MEM_AP)
2016 command_print(cmd, "\t\tComponent base address " TARGET_ADDR_FMT, v->component_base);
2017
2018 if (retval != ERROR_OK) {
2019 command_print(cmd, "\t\tCan't read component, the corresponding core might be turned off");
2020 return retval;
2021 }
2022
2023 if (!is_valid_arm_cs_cidr(v->cid)) {
2024 command_print(cmd, "\t\tInvalid CID 0x%08" PRIx32, v->cid);
2025 return ERROR_OK; /* Don't abort recursion */
2026 }
2027
2028 /* component may take multiple 4K pages */
2029 uint32_t size = ARM_CS_PIDR_SIZE(v->pid);
2030 if (size > 0)
2031 command_print(cmd, "\t\tStart address " TARGET_ADDR_FMT, v->component_base - 0x1000 * size);
2032
2033 command_print(cmd, "\t\tPeripheral ID 0x%010" PRIx64, v->pid);
2034
2035 const unsigned int part_num = ARM_CS_PIDR_PART(v->pid);
2036 unsigned int designer_id = ARM_CS_PIDR_DESIGNER(v->pid);
2037
2038 if (v->pid & ARM_CS_PIDR_JEDEC) {
2039 /* JEP106 code */
2040 command_print(cmd, "\t\tDesigner is 0x%03x, %s",
2041 designer_id, jep106_manufacturer(designer_id));
2042 } else {
2043 /* Legacy ASCII ID, clear invalid bits */
2044 designer_id &= 0x7f;
2045 command_print(cmd, "\t\tDesigner ASCII code 0x%02x, %s",
2046 designer_id, designer_id == 0x41 ? "ARM" : "<unknown>");
2047 }
2048
2049 const struct dap_part_nums *partnum = pidr_to_part_num(designer_id, part_num);
2050 command_print(cmd, "\t\tPart is 0x%03x, %s %s", part_num, partnum->type, partnum->full);
2051
2052 const unsigned int class = ARM_CS_CIDR_CLASS(v->cid);
2053 command_print(cmd, "\t\tComponent class is 0x%x, %s", class, class_description[class]);
2054
2055 if (class == ARM_CS_CLASS_0X1_ROM_TABLE) {
2056 if (v->devtype_memtype & ARM_CS_C1_MEMTYPE_SYSMEM_MASK)
2057 command_print(cmd, "\t\tMEMTYPE system memory present on bus");
2058 else
2059 command_print(cmd, "\t\tMEMTYPE system memory not present: dedicated debug bus");
2060 return ERROR_OK;
2061 }
2062
2063 if (class == ARM_CS_CLASS_0X9_CS_COMPONENT) {
2064 dap_devtype_display(cmd, v->devtype_memtype);
2065
2066 /* REVISIT also show ARM_CS_C9_DEVID */
2067
2068 if ((v->devarch & ARM_CS_C9_DEVARCH_PRESENT) == 0)
2069 return ERROR_OK;
2070
2071 unsigned int architect_id = ARM_CS_C9_DEVARCH_ARCHITECT(v->devarch);
2072 unsigned int revision = ARM_CS_C9_DEVARCH_REVISION(v->devarch);
2073 command_print(cmd, "\t\tDev Arch is 0x%08" PRIx32 ", %s \"%s\" rev.%u", v->devarch,
2074 jep106_manufacturer(architect_id), class0x9_devarch_description(v->devarch),
2075 revision);
2076
2077 if ((v->devarch & DEVARCH_ID_MASK) == DEVARCH_ROM_C_0X9) {
2078 command_print(cmd, "\t\tType is ROM table");
2079
2080 if (v->devid & ARM_CS_C9_DEVID_SYSMEM_MASK)
2081 command_print(cmd, "\t\tMEMTYPE system memory present on bus");
2082 else
2083 command_print(cmd, "\t\tMEMTYPE system memory not present: dedicated debug bus");
2084 }
2085 return ERROR_OK;
2086 }
2087
2088 /* Class other than 0x1 and 0x9 */
2089 return ERROR_OK;
2090 }
2091
2092 static int dap_info_rom_table_entry(int retval, int depth,
2093 unsigned int offset, uint64_t romentry, void *priv)
2094 {
2095 struct command_invocation *cmd = priv;
2096 char tabs[16] = "";
2097
2098 if (depth)
2099 snprintf(tabs, sizeof(tabs), "[L%02d] ", depth);
2100
2101 if (retval != ERROR_OK) {
2102 command_print(cmd, "\t%sROMTABLE[0x%x] Read error", tabs, offset);
2103 command_print(cmd, "\t\tUnable to continue");
2104 command_print(cmd, "\t%s\tStop parsing of ROM table", tabs);
2105 return retval;
2106 }
2107
2108 command_print(cmd, "\t%sROMTABLE[0x%x] = 0x%08" PRIx64,
2109 tabs, offset, romentry);
2110
2111 if (romentry == 0) {
2112 command_print(cmd, "\t%s\tEnd of ROM table", tabs);
2113 return ERROR_OK;
2114 }
2115
2116 if (!(romentry & ARM_CS_ROMENTRY_PRESENT)) {
2117 command_print(cmd, "\t\tComponent not present");
2118 return ERROR_OK;
2119 }
2120
2121 return ERROR_OK;
2122 }
2123
2124 int dap_info_command(struct command_invocation *cmd, struct adiv5_ap *ap)
2125 {
2126 struct rtp_ops dap_info_ops = {
2127 .ap_header = dap_info_ap_header,
2128 .mem_ap_header = dap_info_mem_ap_header,
2129 .cs_component = dap_info_cs_component,
2130 .rom_table_entry = dap_info_rom_table_entry,
2131 .priv = cmd,
2132 };
2133
2134 return rtp_ap(&dap_info_ops, ap, 0);
2135 }
2136
2137 /* Actions for dap_lookup_cs_component() */
2138
2139 struct dap_lookup_data {
2140 /* input */
2141 unsigned int idx;
2142 unsigned int type;
2143 /* output */
2144 uint64_t component_base;
2145 uint64_t ap_num;
2146 };
2147
2148 static int dap_lookup_cs_component_cs_component(int retval,
2149 struct cs_component_vals *v, int depth, void *priv)
2150 {
2151 struct dap_lookup_data *lookup = priv;
2152
2153 if (retval != ERROR_OK)
2154 return retval;
2155
2156 if (!is_valid_arm_cs_cidr(v->cid))
2157 return ERROR_OK;
2158
2159 const unsigned int class = ARM_CS_CIDR_CLASS(v->cid);
2160 if (class != ARM_CS_CLASS_0X9_CS_COMPONENT)
2161 return ERROR_OK;
2162
2163 if ((v->devtype_memtype & ARM_CS_C9_DEVTYPE_MASK) != lookup->type)
2164 return ERROR_OK;
2165
2166 if (lookup->idx) {
2167 /* search for next one */
2168 --lookup->idx;
2169 return ERROR_OK;
2170 }
2171
2172 /* Found! */
2173 lookup->component_base = v->component_base;
2174 lookup->ap_num = v->ap->ap_num;
2175 return CORESIGHT_COMPONENT_FOUND;
2176 }
2177
2178 int dap_lookup_cs_component(struct adiv5_ap *ap, uint8_t type,
2179 target_addr_t *addr, int32_t core_id)
2180 {
2181 struct dap_lookup_data lookup = {
2182 .type = type,
2183 .idx = core_id,
2184 };
2185 struct rtp_ops dap_lookup_cs_component_ops = {
2186 .ap_header = NULL,
2187 .mem_ap_header = NULL,
2188 .cs_component = dap_lookup_cs_component_cs_component,
2189 .rom_table_entry = NULL,
2190 .priv = &lookup,
2191 };
2192
2193 int retval = rtp_ap(&dap_lookup_cs_component_ops, ap, 0);
2194 if (retval == CORESIGHT_COMPONENT_FOUND) {
2195 if (lookup.ap_num != ap->ap_num) {
2196 /* TODO: handle search from root ROM table */
2197 LOG_DEBUG("CS lookup ended in AP # 0x%" PRIx64 ". Ignore it", lookup.ap_num);
2198 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2199 }
2200 LOG_DEBUG("CS lookup found at 0x%" PRIx64, lookup.component_base);
2201 *addr = lookup.component_base;
2202 return ERROR_OK;
2203 }
2204 if (retval != ERROR_OK) {
2205 LOG_DEBUG("CS lookup error %d", retval);
2206 return retval;
2207 }
2208 LOG_DEBUG("CS lookup not found");
2209 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2210 }
2211
2212 enum adiv5_cfg_param {
2213 CFG_DAP,
2214 CFG_AP_NUM,
2215 CFG_BASEADDR,
2216 CFG_CTIBASE, /* DEPRECATED */
2217 };
2218
2219 static const struct jim_nvp nvp_config_opts[] = {
2220 { .name = "-dap", .value = CFG_DAP },
2221 { .name = "-ap-num", .value = CFG_AP_NUM },
2222 { .name = "-baseaddr", .value = CFG_BASEADDR },
2223 { .name = "-ctibase", .value = CFG_CTIBASE }, /* DEPRECATED */
2224 { .name = NULL, .value = -1 }
2225 };
2226
2227 static int adiv5_jim_spot_configure(struct jim_getopt_info *goi,
2228 struct adiv5_dap **dap_p, uint64_t *ap_num_p, uint32_t *base_p)
2229 {
2230 assert(dap_p && ap_num_p);
2231
2232 if (!goi->argc)
2233 return JIM_OK;
2234
2235 Jim_SetEmptyResult(goi->interp);
2236
2237 struct jim_nvp *n;
2238 int e = jim_nvp_name2value_obj(goi->interp, nvp_config_opts,
2239 goi->argv[0], &n);
2240 if (e != JIM_OK)
2241 return JIM_CONTINUE;
2242
2243 /* base_p can be NULL, then '-baseaddr' option is treated as unknown */
2244 if (!base_p && (n->value == CFG_BASEADDR || n->value == CFG_CTIBASE))
2245 return JIM_CONTINUE;
2246
2247 e = jim_getopt_obj(goi, NULL);
2248 if (e != JIM_OK)
2249 return e;
2250
2251 switch (n->value) {
2252 case CFG_DAP:
2253 if (goi->isconfigure) {
2254 Jim_Obj *o_t;
2255 struct adiv5_dap *dap;
2256 e = jim_getopt_obj(goi, &o_t);
2257 if (e != JIM_OK)
2258 return e;
2259 dap = dap_instance_by_jim_obj(goi->interp, o_t);
2260 if (!dap) {
2261 Jim_SetResultString(goi->interp, "DAP name invalid!", -1);
2262 return JIM_ERR;
2263 }
2264 if (*dap_p && *dap_p != dap) {
2265 Jim_SetResultString(goi->interp,
2266 "DAP assignment cannot be changed!", -1);
2267 return JIM_ERR;
2268 }
2269 *dap_p = dap;
2270 } else {
2271 if (goi->argc)
2272 goto err_no_param;
2273 if (!*dap_p) {
2274 Jim_SetResultString(goi->interp, "DAP not configured", -1);
2275 return JIM_ERR;
2276 }
2277 Jim_SetResultString(goi->interp, adiv5_dap_name(*dap_p), -1);
2278 }
2279 break;
2280
2281 case CFG_AP_NUM:
2282 if (goi->isconfigure) {
2283 /* jim_wide is a signed 64 bits int, ap_num is unsigned with max 52 bits */
2284 jim_wide ap_num;
2285 e = jim_getopt_wide(goi, &ap_num);
2286 if (e != JIM_OK)
2287 return e;
2288 /* we still don't know dap->adi_version */
2289 if (ap_num < 0 || (ap_num > DP_APSEL_MAX && (ap_num & 0xfff))) {
2290 Jim_SetResultString(goi->interp, "Invalid AP number!", -1);
2291 return JIM_ERR;
2292 }
2293 *ap_num_p = ap_num;
2294 } else {
2295 if (goi->argc)
2296 goto err_no_param;
2297 if (*ap_num_p == DP_APSEL_INVALID) {
2298 Jim_SetResultString(goi->interp, "AP number not configured", -1);
2299 return JIM_ERR;
2300 }
2301 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, *ap_num_p));
2302 }
2303 break;
2304
2305 case CFG_CTIBASE:
2306 LOG_WARNING("DEPRECATED! use \'-baseaddr' not \'-ctibase\'");
2307 /* fall through */
2308 case CFG_BASEADDR:
2309 if (goi->isconfigure) {
2310 jim_wide base;
2311 e = jim_getopt_wide(goi, &base);
2312 if (e != JIM_OK)
2313 return e;
2314 *base_p = (uint32_t)base;
2315 } else {
2316 if (goi->argc)
2317 goto err_no_param;
2318 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, *base_p));
2319 }
2320 break;
2321 };
2322
2323 return JIM_OK;
2324
2325 err_no_param:
2326 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "NO PARAMS");
2327 return JIM_ERR;
2328 }
2329
2330 int adiv5_jim_configure(struct target *target, struct jim_getopt_info *goi)
2331 {
2332 struct adiv5_private_config *pc;
2333 int e;
2334
2335 pc = (struct adiv5_private_config *)target->private_config;
2336 if (!pc) {
2337 pc = calloc(1, sizeof(struct adiv5_private_config));
2338 if (!pc) {
2339 LOG_ERROR("Out of memory");
2340 return JIM_ERR;
2341 }
2342 pc->ap_num = DP_APSEL_INVALID;
2343 target->private_config = pc;
2344 }
2345
2346 target->has_dap = true;
2347
2348 e = adiv5_jim_spot_configure(goi, &pc->dap, &pc->ap_num, NULL);
2349 if (e != JIM_OK)
2350 return e;
2351
2352 if (pc->dap && !target->dap_configured) {
2353 if (target->tap_configured) {
2354 pc->dap = NULL;
2355 Jim_SetResultString(goi->interp,
2356 "-chain-position and -dap configparams are mutually exclusive!", -1);
2357 return JIM_ERR;
2358 }
2359 target->tap = pc->dap->tap;
2360 target->dap_configured = true;
2361 }
2362
2363 return JIM_OK;
2364 }
2365
2366 int adiv5_verify_config(struct adiv5_private_config *pc)
2367 {
2368 if (!pc)
2369 return ERROR_FAIL;
2370
2371 if (!pc->dap)
2372 return ERROR_FAIL;
2373
2374 return ERROR_OK;
2375 }
2376
2377 int adiv5_jim_mem_ap_spot_configure(struct adiv5_mem_ap_spot *cfg,
2378 struct jim_getopt_info *goi)
2379 {
2380 return adiv5_jim_spot_configure(goi, &cfg->dap, &cfg->ap_num, &cfg->base);
2381 }
2382
2383 int adiv5_mem_ap_spot_init(struct adiv5_mem_ap_spot *p)
2384 {
2385 p->dap = NULL;
2386 p->ap_num = DP_APSEL_INVALID;
2387 p->base = 0;
2388 return ERROR_OK;
2389 }
2390
2391 COMMAND_HANDLER(handle_dap_info_command)
2392 {
2393 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2394 uint64_t apsel;
2395
2396 switch (CMD_ARGC) {
2397 case 0:
2398 apsel = dap->apsel;
2399 break;
2400 case 1:
2401 if (!strcmp(CMD_ARGV[0], "root")) {
2402 if (!is_adiv6(dap)) {
2403 command_print(CMD, "Option \"root\" not allowed with ADIv5 DAP");
2404 return ERROR_COMMAND_ARGUMENT_INVALID;
2405 }
2406 int retval = adiv6_dap_read_baseptr(CMD, dap, &apsel);
2407 if (retval != ERROR_OK) {
2408 command_print(CMD, "Failed reading DAP baseptr");
2409 return retval;
2410 }
2411 break;
2412 }
2413 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[0], apsel);
2414 if (!is_ap_num_valid(dap, apsel)) {
2415 command_print(CMD, "Invalid AP number");
2416 return ERROR_COMMAND_ARGUMENT_INVALID;
2417 }
2418 break;
2419 default:
2420 return ERROR_COMMAND_SYNTAX_ERROR;
2421 }
2422
2423 struct adiv5_ap *ap = dap_get_ap(dap, apsel);
2424 if (!ap) {
2425 command_print(CMD, "Cannot get AP");
2426 return ERROR_FAIL;
2427 }
2428
2429 int retval = dap_info_command(CMD, ap);
2430 dap_put_ap(ap);
2431 return retval;
2432 }
2433
2434 COMMAND_HANDLER(dap_baseaddr_command)
2435 {
2436 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2437 uint64_t apsel;
2438 uint32_t baseaddr_lower, baseaddr_upper;
2439 struct adiv5_ap *ap;
2440 target_addr_t baseaddr;
2441 int retval;
2442
2443 baseaddr_upper = 0;
2444
2445 switch (CMD_ARGC) {
2446 case 0:
2447 apsel = dap->apsel;
2448 break;
2449 case 1:
2450 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[0], apsel);
2451 if (!is_ap_num_valid(dap, apsel)) {
2452 command_print(CMD, "Invalid AP number");
2453 return ERROR_COMMAND_ARGUMENT_INVALID;
2454 }
2455 break;
2456 default:
2457 return ERROR_COMMAND_SYNTAX_ERROR;
2458 }
2459
2460 /* NOTE: assumes we're talking to a MEM-AP, which
2461 * has a base address. There are other kinds of AP,
2462 * though they're not common for now. This should
2463 * use the ID register to verify it's a MEM-AP.
2464 */
2465
2466 ap = dap_get_ap(dap, apsel);
2467 if (!ap) {
2468 command_print(CMD, "Cannot get AP");
2469 return ERROR_FAIL;
2470 }
2471
2472 retval = dap_queue_ap_read(ap, MEM_AP_REG_BASE(dap), &baseaddr_lower);
2473
2474 if (retval == ERROR_OK && ap->cfg_reg == MEM_AP_REG_CFG_INVALID)
2475 retval = dap_queue_ap_read(ap, MEM_AP_REG_CFG(dap), &ap->cfg_reg);
2476
2477 if (retval == ERROR_OK && (ap->cfg_reg == MEM_AP_REG_CFG_INVALID || is_64bit_ap(ap))) {
2478 /* MEM_AP_REG_BASE64 is defined as 'RES0'; can be read and then ignored on 32 bits AP */
2479 retval = dap_queue_ap_read(ap, MEM_AP_REG_BASE64(dap), &baseaddr_upper);
2480 }
2481
2482 if (retval == ERROR_OK)
2483 retval = dap_run(dap);
2484 dap_put_ap(ap);
2485 if (retval != ERROR_OK)
2486 return retval;
2487
2488 if (is_64bit_ap(ap)) {
2489 baseaddr = (((target_addr_t)baseaddr_upper) << 32) | baseaddr_lower;
2490 command_print(CMD, "0x%016" PRIx64, baseaddr);
2491 } else
2492 command_print(CMD, "0x%08" PRIx32, baseaddr_lower);
2493
2494 return ERROR_OK;
2495 }
2496
2497 COMMAND_HANDLER(dap_memaccess_command)
2498 {
2499 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2500 struct adiv5_ap *ap;
2501 uint32_t memaccess_tck;
2502
2503 switch (CMD_ARGC) {
2504 case 0:
2505 ap = dap_get_ap(dap, dap->apsel);
2506 if (!ap) {
2507 command_print(CMD, "Cannot get AP");
2508 return ERROR_FAIL;
2509 }
2510 memaccess_tck = ap->memaccess_tck;
2511 break;
2512 case 1:
2513 ap = dap_get_config_ap(dap, dap->apsel);
2514 if (!ap) {
2515 command_print(CMD, "Cannot get AP");
2516 return ERROR_FAIL;
2517 }
2518 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], memaccess_tck);
2519 ap->memaccess_tck = memaccess_tck;
2520 break;
2521 default:
2522 return ERROR_COMMAND_SYNTAX_ERROR;
2523 }
2524
2525 dap_put_ap(ap);
2526
2527 command_print(CMD, "memory bus access delay set to %" PRIu32 " tck",
2528 memaccess_tck);
2529
2530 return ERROR_OK;
2531 }
2532
2533 COMMAND_HANDLER(dap_apsel_command)
2534 {
2535 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2536 uint64_t apsel;
2537
2538 switch (CMD_ARGC) {
2539 case 0:
2540 command_print(CMD, "0x%" PRIx64, dap->apsel);
2541 return ERROR_OK;
2542 case 1:
2543 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[0], apsel);
2544 if (!is_ap_num_valid(dap, apsel)) {
2545 command_print(CMD, "Invalid AP number");
2546 return ERROR_COMMAND_ARGUMENT_INVALID;
2547 }
2548 break;
2549 default:
2550 return ERROR_COMMAND_SYNTAX_ERROR;
2551 }
2552
2553 dap->apsel = apsel;
2554 return ERROR_OK;
2555 }
2556
2557 COMMAND_HANDLER(dap_apcsw_command)
2558 {
2559 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2560 struct adiv5_ap *ap;
2561 uint32_t csw_val, csw_mask;
2562
2563 switch (CMD_ARGC) {
2564 case 0:
2565 ap = dap_get_ap(dap, dap->apsel);
2566 if (!ap) {
2567 command_print(CMD, "Cannot get AP");
2568 return ERROR_FAIL;
2569 }
2570 command_print(CMD, "AP#0x%" PRIx64 " selected, csw 0x%8.8" PRIx32,
2571 dap->apsel, ap->csw_default);
2572 break;
2573 case 1:
2574 if (strcmp(CMD_ARGV[0], "default") == 0)
2575 csw_val = CSW_AHB_DEFAULT;
2576 else
2577 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], csw_val);
2578
2579 if (csw_val & (CSW_SIZE_MASK | CSW_ADDRINC_MASK)) {
2580 LOG_ERROR("CSW value cannot include 'Size' and 'AddrInc' bit-fields");
2581 return ERROR_COMMAND_ARGUMENT_INVALID;
2582 }
2583 ap = dap_get_config_ap(dap, dap->apsel);
2584 if (!ap) {
2585 command_print(CMD, "Cannot get AP");
2586 return ERROR_FAIL;
2587 }
2588 ap->csw_default = csw_val;
2589 break;
2590 case 2:
2591 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], csw_val);
2592 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], csw_mask);
2593 if (csw_mask & (CSW_SIZE_MASK | CSW_ADDRINC_MASK)) {
2594 LOG_ERROR("CSW mask cannot include 'Size' and 'AddrInc' bit-fields");
2595 return ERROR_COMMAND_ARGUMENT_INVALID;
2596 }
2597 ap = dap_get_config_ap(dap, dap->apsel);
2598 if (!ap) {
2599 command_print(CMD, "Cannot get AP");
2600 return ERROR_FAIL;
2601 }
2602 ap->csw_default = (ap->csw_default & ~csw_mask) | (csw_val & csw_mask);
2603 break;
2604 default:
2605 return ERROR_COMMAND_SYNTAX_ERROR;
2606 }
2607 dap_put_ap(ap);
2608
2609 return ERROR_OK;
2610 }
2611
2612
2613
2614 COMMAND_HANDLER(dap_apid_command)
2615 {
2616 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2617 uint64_t apsel;
2618 uint32_t apid;
2619 int retval;
2620
2621 switch (CMD_ARGC) {
2622 case 0:
2623 apsel = dap->apsel;
2624 break;
2625 case 1:
2626 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[0], apsel);
2627 if (!is_ap_num_valid(dap, apsel)) {
2628 command_print(CMD, "Invalid AP number");
2629 return ERROR_COMMAND_ARGUMENT_INVALID;
2630 }
2631 break;
2632 default:
2633 return ERROR_COMMAND_SYNTAX_ERROR;
2634 }
2635
2636 struct adiv5_ap *ap = dap_get_ap(dap, apsel);
2637 if (!ap) {
2638 command_print(CMD, "Cannot get AP");
2639 return ERROR_FAIL;
2640 }
2641 retval = dap_queue_ap_read(ap, AP_REG_IDR(dap), &apid);
2642 if (retval != ERROR_OK) {
2643 dap_put_ap(ap);
2644 return retval;
2645 }
2646 retval = dap_run(dap);
2647 dap_put_ap(ap);
2648 if (retval != ERROR_OK)
2649 return retval;
2650
2651 command_print(CMD, "0x%8.8" PRIx32, apid);
2652
2653 return retval;
2654 }
2655
2656 COMMAND_HANDLER(dap_apreg_command)
2657 {
2658 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2659 uint64_t apsel;
2660 uint32_t reg, value;
2661 int retval;
2662
2663 if (CMD_ARGC < 2 || CMD_ARGC > 3)
2664 return ERROR_COMMAND_SYNTAX_ERROR;
2665
2666 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[0], apsel);
2667 if (!is_ap_num_valid(dap, apsel)) {
2668 command_print(CMD, "Invalid AP number");
2669 return ERROR_COMMAND_ARGUMENT_INVALID;
2670 }
2671
2672 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], reg);
2673 if (is_adiv6(dap)) {
2674 if (reg >= 4096 || (reg & 3)) {
2675 command_print(CMD, "Invalid reg value (should be less than 4096 and 4 bytes aligned)");
2676 return ERROR_COMMAND_ARGUMENT_INVALID;
2677 }
2678 } else { /* ADI version 5 */
2679 if (reg >= 256 || (reg & 3)) {
2680 command_print(CMD, "Invalid reg value (should be less than 256 and 4 bytes aligned)");
2681 return ERROR_COMMAND_ARGUMENT_INVALID;
2682 }
2683 }
2684
2685 struct adiv5_ap *ap = dap_get_ap(dap, apsel);
2686 if (!ap) {
2687 command_print(CMD, "Cannot get AP");
2688 return ERROR_FAIL;
2689 }
2690
2691 if (CMD_ARGC == 3) {
2692 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], value);
2693 /* see if user supplied register address is a match for the CSW or TAR register */
2694 if (reg == MEM_AP_REG_CSW(dap)) {
2695 ap->csw_value = 0; /* invalid, in case write fails */
2696 retval = dap_queue_ap_write(ap, reg, value);
2697 if (retval == ERROR_OK)
2698 ap->csw_value = value;
2699 } else if (reg == MEM_AP_REG_TAR(dap)) {
2700 retval = dap_queue_ap_write(ap, reg, value);
2701 if (retval == ERROR_OK)
2702 ap->tar_value = (ap->tar_value & ~0xFFFFFFFFull) | value;
2703 else {
2704 /* To track independent writes to TAR and TAR64, two tar_valid flags */
2705 /* should be used. To keep it simple, tar_valid is only invalidated on a */
2706 /* write fail. This approach causes a later re-write of the TAR and TAR64 */
2707 /* if tar_valid is false. */
2708 ap->tar_valid = false;
2709 }
2710 } else if (reg == MEM_AP_REG_TAR64(dap)) {
2711 retval = dap_queue_ap_write(ap, reg, value);
2712 if (retval == ERROR_OK)
2713 ap->tar_value = (ap->tar_value & 0xFFFFFFFFull) | (((target_addr_t)value) << 32);
2714 else {
2715 /* See above comment for the MEM_AP_REG_TAR failed write case */
2716 ap->tar_valid = false;
2717 }
2718 } else {
2719 retval = dap_queue_ap_write(ap, reg, value);
2720 }
2721 } else {
2722 retval = dap_queue_ap_read(ap, reg, &value);
2723 }
2724 if (retval == ERROR_OK)
2725 retval = dap_run(dap);
2726
2727 dap_put_ap(ap);
2728
2729 if (retval != ERROR_OK)
2730 return retval;
2731
2732 if (CMD_ARGC == 2)
2733 command_print(CMD, "0x%08" PRIx32, value);
2734
2735 return retval;
2736 }
2737
2738 COMMAND_HANDLER(dap_dpreg_command)
2739 {
2740 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2741 uint32_t reg, value;
2742 int retval;
2743
2744 if (CMD_ARGC < 1 || CMD_ARGC > 2)
2745 return ERROR_COMMAND_SYNTAX_ERROR;
2746
2747 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg);
2748 if (reg >= 256 || (reg & 3)) {
2749 command_print(CMD, "Invalid reg value (should be less than 256 and 4 bytes aligned)");
2750 return ERROR_COMMAND_ARGUMENT_INVALID;
2751 }
2752
2753 if (CMD_ARGC == 2) {
2754 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
2755 retval = dap_queue_dp_write(dap, reg, value);
2756 } else {
2757 retval = dap_queue_dp_read(dap, reg, &value);
2758 }
2759 if (retval == ERROR_OK)
2760 retval = dap_run(dap);
2761
2762 if (retval != ERROR_OK)
2763 return retval;
2764
2765 if (CMD_ARGC == 1)
2766 command_print(CMD, "0x%08" PRIx32, value);
2767
2768 return retval;
2769 }
2770
2771 COMMAND_HANDLER(dap_ti_be_32_quirks_command)
2772 {
2773 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2774 return CALL_COMMAND_HANDLER(handle_command_parse_bool, &dap->ti_be_32_quirks,
2775 "TI BE-32 quirks mode");
2776 }
2777
2778 COMMAND_HANDLER(dap_nu_npcx_quirks_command)
2779 {
2780 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2781 return CALL_COMMAND_HANDLER(handle_command_parse_bool, &dap->nu_npcx_quirks,
2782 "Nuvoton NPCX quirks mode");
2783 }
2784
2785 const struct command_registration dap_instance_commands[] = {
2786 {
2787 .name = "info",
2788 .handler = handle_dap_info_command,
2789 .mode = COMMAND_EXEC,
2790 .help = "display ROM table for specified MEM-AP (default currently selected AP) "
2791 "or the ADIv6 root ROM table",
2792 .usage = "[ap_num | 'root']",
2793 },
2794 {
2795 .name = "apsel",
2796 .handler = dap_apsel_command,
2797 .mode = COMMAND_ANY,
2798 .help = "Set the currently selected AP (default 0) "
2799 "and display the result",
2800 .usage = "[ap_num]",
2801 },
2802 {
2803 .name = "apcsw",
2804 .handler = dap_apcsw_command,
2805 .mode = COMMAND_ANY,
2806 .help = "Set CSW default bits",
2807 .usage = "[value [mask]]",
2808 },
2809
2810 {
2811 .name = "apid",
2812 .handler = dap_apid_command,
2813 .mode = COMMAND_EXEC,
2814 .help = "return ID register from AP "
2815 "(default currently selected AP)",
2816 .usage = "[ap_num]",
2817 },
2818 {
2819 .name = "apreg",
2820 .handler = dap_apreg_command,
2821 .mode = COMMAND_EXEC,
2822 .help = "read/write a register from AP "
2823 "(reg is byte address of a word register, like 0 4 8...)",
2824 .usage = "ap_num reg [value]",
2825 },
2826 {
2827 .name = "dpreg",
2828 .handler = dap_dpreg_command,
2829 .mode = COMMAND_EXEC,
2830 .help = "read/write a register from DP "
2831 "(reg is byte address (bank << 4 | reg) of a word register, like 0 4 8...)",
2832 .usage = "reg [value]",
2833 },
2834 {
2835 .name = "baseaddr",
2836 .handler = dap_baseaddr_command,
2837 .mode = COMMAND_EXEC,
2838 .help = "return debug base address from MEM-AP "
2839 "(default currently selected AP)",
2840 .usage = "[ap_num]",
2841 },
2842 {
2843 .name = "memaccess",
2844 .handler = dap_memaccess_command,
2845 .mode = COMMAND_EXEC,
2846 .help = "set/get number of extra tck for MEM-AP memory "
2847 "bus access [0-255]",
2848 .usage = "[cycles]",
2849 },
2850 {
2851 .name = "ti_be_32_quirks",
2852 .handler = dap_ti_be_32_quirks_command,
2853 .mode = COMMAND_CONFIG,
2854 .help = "set/get quirks mode for TI TMS450/TMS570 processors",
2855 .usage = "[enable]",
2856 },
2857 {
2858 .name = "nu_npcx_quirks",
2859 .handler = dap_nu_npcx_quirks_command,
2860 .mode = COMMAND_CONFIG,
2861 .help = "set/get quirks mode for Nuvoton NPCX controllers",
2862 .usage = "[enable]",
2863 },
2864 COMMAND_REGISTRATION_DONE
2865 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)