jtag: use proper format with uint32_t
[openocd.git] / src / jtag / drivers / xlnx-pcie-xvc.c
1 /* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright (c) 2019 Google, LLC.
4 * Author: Moritz Fischer <moritzf@google.com>
5 */
6
7 #ifdef HAVE_CONFIG_H
8 #include "config.h"
9 #endif
10
11 #include <stdint.h>
12 #include <stdlib.h>
13 #include <math.h>
14 #include <unistd.h>
15 #include <linux/pci.h>
16
17 #include <jtag/interface.h>
18 #include <jtag/swd.h>
19 #include <jtag/commands.h>
20 #include <helper/replacements.h>
21 #include <helper/bits.h>
22
23 /* Available only from kernel v4.10 */
24 #ifndef PCI_CFG_SPACE_EXP_SIZE
25 #define PCI_CFG_SPACE_EXP_SIZE 4096
26 #endif
27
28 #define PCIE_EXT_CAP_LST 0x100
29
30 #define XLNX_XVC_EXT_CAP 0x00
31 #define XLNX_XVC_VSEC_HDR 0x04
32 #define XLNX_XVC_LEN_REG 0x0C
33 #define XLNX_XVC_TMS_REG 0x10
34 #define XLNX_XVC_TDx_REG 0x14
35
36 #define XLNX_XVC_CAP_SIZE 0x20
37 #define XLNX_XVC_VSEC_ID 0x8
38 #define XLNX_XVC_MAX_BITS 0x20
39
40 #define MASK_ACK(x) (((x) >> 9) & 0x7)
41 #define MASK_PAR(x) ((int)((x) & 0x1))
42
43 struct xlnx_pcie_xvc {
44 int fd;
45 unsigned offset;
46 char *device;
47 };
48
49 static struct xlnx_pcie_xvc xlnx_pcie_xvc_state;
50 static struct xlnx_pcie_xvc *xlnx_pcie_xvc = &xlnx_pcie_xvc_state;
51
52 static int xlnx_pcie_xvc_read_reg(const int offset, uint32_t *val)
53 {
54 uint32_t res;
55 int err;
56
57 /* Note: This should be ok endianness-wise because by going
58 * through sysfs the kernel does the conversion in the config
59 * space accessor functions
60 */
61 err = pread(xlnx_pcie_xvc->fd, &res, sizeof(res),
62 xlnx_pcie_xvc->offset + offset);
63 if (err != sizeof(res)) {
64 LOG_ERROR("Failed to read offset %x", offset);
65 return ERROR_JTAG_DEVICE_ERROR;
66 }
67
68 if (val)
69 *val = res;
70
71 return ERROR_OK;
72 }
73
74 static int xlnx_pcie_xvc_write_reg(const int offset, const uint32_t val)
75 {
76 int err;
77
78 /* Note: This should be ok endianness-wise because by going
79 * through sysfs the kernel does the conversion in the config
80 * space accessor functions
81 */
82 err = pwrite(xlnx_pcie_xvc->fd, &val, sizeof(val),
83 xlnx_pcie_xvc->offset + offset);
84 if (err != sizeof(val)) {
85 LOG_ERROR("Failed to write offset: %x with value: %" PRIx32,
86 offset, val);
87 return ERROR_JTAG_DEVICE_ERROR;
88 }
89
90 return ERROR_OK;
91 }
92
93 static int xlnx_pcie_xvc_transact(size_t num_bits, uint32_t tms, uint32_t tdi,
94 uint32_t *tdo)
95 {
96 int err;
97
98 err = xlnx_pcie_xvc_write_reg(XLNX_XVC_LEN_REG, num_bits);
99 if (err != ERROR_OK)
100 return err;
101
102 err = xlnx_pcie_xvc_write_reg(XLNX_XVC_TMS_REG, tms);
103 if (err != ERROR_OK)
104 return err;
105
106 err = xlnx_pcie_xvc_write_reg(XLNX_XVC_TDx_REG, tdi);
107 if (err != ERROR_OK)
108 return err;
109
110 err = xlnx_pcie_xvc_read_reg(XLNX_XVC_TDx_REG, tdo);
111 if (err != ERROR_OK)
112 return err;
113
114 if (tdo)
115 LOG_DEBUG_IO("Transact num_bits: %zu, tms: %" PRIx32 ", tdi: %" PRIx32 ", tdo: %" PRIx32,
116 num_bits, tms, tdi, *tdo);
117 else
118 LOG_DEBUG_IO("Transact num_bits: %zu, tms: %" PRIx32 ", tdi: %" PRIx32 ", tdo: <null>",
119 num_bits, tms, tdi);
120 return ERROR_OK;
121 }
122
123 int xlnx_pcie_xvc_execute_stableclocks(struct jtag_command *cmd)
124 {
125 int tms = tap_get_state() == TAP_RESET ? 1 : 0;
126 size_t left = cmd->cmd.stableclocks->num_cycles;
127 size_t write;
128 int err;
129
130 LOG_DEBUG("stableclocks %i cycles", cmd->cmd.runtest->num_cycles);
131
132 while (left) {
133 write = MIN(XLNX_XVC_MAX_BITS, left);
134 err = xlnx_pcie_xvc_transact(write, tms, 0, NULL);
135 if (err != ERROR_OK)
136 return err;
137 left -= write;
138 };
139
140 return ERROR_OK;
141 }
142
143 static int xlnx_pcie_xvc_execute_statemove(size_t skip)
144 {
145 uint8_t tms_scan = tap_get_tms_path(tap_get_state(),
146 tap_get_end_state());
147 int tms_count = tap_get_tms_path_len(tap_get_state(),
148 tap_get_end_state());
149 int err;
150
151 LOG_DEBUG("statemove starting at (skip: %zu) %s end in %s", skip,
152 tap_state_name(tap_get_state()),
153 tap_state_name(tap_get_end_state()));
154
155
156 err = xlnx_pcie_xvc_transact(tms_count - skip, tms_scan >> skip, 0, NULL);
157 if (err != ERROR_OK)
158 return err;
159
160 tap_set_state(tap_get_end_state());
161
162 return ERROR_OK;
163 }
164
165 static int xlnx_pcie_xvc_execute_runtest(struct jtag_command *cmd)
166 {
167 int err = ERROR_OK;
168
169 LOG_DEBUG("runtest %i cycles, end in %i",
170 cmd->cmd.runtest->num_cycles,
171 cmd->cmd.runtest->end_state);
172
173 tap_state_t tmp_state = tap_get_end_state();
174
175 if (tap_get_state() != TAP_IDLE) {
176 tap_set_end_state(TAP_IDLE);
177 err = xlnx_pcie_xvc_execute_statemove(0);
178 if (err != ERROR_OK)
179 return err;
180 };
181
182 size_t left = cmd->cmd.runtest->num_cycles;
183 size_t write;
184
185 while (left) {
186 write = MIN(XLNX_XVC_MAX_BITS, left);
187 err = xlnx_pcie_xvc_transact(write, 0, 0, NULL);
188 if (err != ERROR_OK)
189 return err;
190 left -= write;
191 };
192
193 tap_set_end_state(tmp_state);
194 if (tap_get_state() != tap_get_end_state())
195 err = xlnx_pcie_xvc_execute_statemove(0);
196
197 return err;
198 }
199
200 static int xlnx_pcie_xvc_execute_pathmove(struct jtag_command *cmd)
201 {
202 size_t num_states = cmd->cmd.pathmove->num_states;
203 tap_state_t *path = cmd->cmd.pathmove->path;
204 int err = ERROR_OK;
205 size_t i;
206
207 LOG_DEBUG("pathmove: %i states, end in %i",
208 cmd->cmd.pathmove->num_states,
209 cmd->cmd.pathmove->path[cmd->cmd.pathmove->num_states - 1]);
210
211 for (i = 0; i < num_states; i++) {
212 if (path[i] == tap_state_transition(tap_get_state(), false)) {
213 err = xlnx_pcie_xvc_transact(1, 1, 0, NULL);
214 } else if (path[i] == tap_state_transition(tap_get_state(), true)) {
215 err = xlnx_pcie_xvc_transact(1, 0, 0, NULL);
216 } else {
217 LOG_ERROR("BUG: %s -> %s isn't a valid TAP transition.",
218 tap_state_name(tap_get_state()),
219 tap_state_name(path[i]));
220 err = ERROR_JTAG_QUEUE_FAILED;
221 }
222 if (err != ERROR_OK)
223 return err;
224 tap_set_state(path[i]);
225 }
226
227 tap_set_end_state(tap_get_state());
228
229 return ERROR_OK;
230 }
231
232 static int xlnx_pcie_xvc_execute_scan(struct jtag_command *cmd)
233 {
234 enum scan_type type = jtag_scan_type(cmd->cmd.scan);
235 tap_state_t saved_end_state = cmd->cmd.scan->end_state;
236 bool ir_scan = cmd->cmd.scan->ir_scan;
237 uint32_t tdi, tms, tdo;
238 uint8_t *buf, *rd_ptr;
239 int err, scan_size;
240 size_t write;
241 size_t left;
242
243 scan_size = jtag_build_buffer(cmd->cmd.scan, &buf);
244 rd_ptr = buf;
245 LOG_DEBUG("%s scan type %d %d bits; starts in %s end in %s",
246 (cmd->cmd.scan->ir_scan) ? "IR" : "DR", type, scan_size,
247 tap_state_name(tap_get_state()),
248 tap_state_name(cmd->cmd.scan->end_state));
249
250 /* If we're in TAP_DR_SHIFT state but need to do a IR_SCAN or
251 * vice-versa, do a statemove to corresponding other state, then restore
252 * end state
253 */
254 if (ir_scan && tap_get_state() != TAP_IRSHIFT) {
255 tap_set_end_state(TAP_IRSHIFT);
256 err = xlnx_pcie_xvc_execute_statemove(0);
257 if (err != ERROR_OK)
258 goto out_err;
259 tap_set_end_state(saved_end_state);
260 } else if (!ir_scan && (tap_get_state() != TAP_DRSHIFT)) {
261 tap_set_end_state(TAP_DRSHIFT);
262 err = xlnx_pcie_xvc_execute_statemove(0);
263 if (err != ERROR_OK)
264 goto out_err;
265 tap_set_end_state(saved_end_state);
266 }
267
268 left = scan_size;
269 while (left) {
270 write = MIN(XLNX_XVC_MAX_BITS, left);
271 /* the last TMS should be a 1, to leave the state */
272 tms = left <= XLNX_XVC_MAX_BITS ? BIT(write - 1) : 0;
273 tdi = (type != SCAN_IN) ? buf_get_u32(rd_ptr, 0, write) : 0;
274 err = xlnx_pcie_xvc_transact(write, tms, tdi, type != SCAN_OUT ?
275 &tdo : NULL);
276 if (err != ERROR_OK)
277 goto out_err;
278 left -= write;
279 if (type != SCAN_OUT)
280 buf_set_u32(rd_ptr, 0, write, tdo);
281 rd_ptr += sizeof(uint32_t);
282 };
283
284 err = jtag_read_buffer(buf, cmd->cmd.scan);
285 if (buf)
286 free(buf);
287
288 if (tap_get_state() != tap_get_end_state())
289 err = xlnx_pcie_xvc_execute_statemove(1);
290
291 return err;
292
293 out_err:
294 if (buf)
295 free(buf);
296 return err;
297 }
298
299 static void xlnx_pcie_xvc_execute_reset(struct jtag_command *cmd)
300 {
301 LOG_DEBUG("reset trst: %i srst: %i", cmd->cmd.reset->trst,
302 cmd->cmd.reset->srst);
303 }
304
305 static void xlnx_pcie_xvc_execute_sleep(struct jtag_command *cmd)
306 {
307 LOG_DEBUG("sleep %" PRIu32 "", cmd->cmd.sleep->us);
308 usleep(cmd->cmd.sleep->us);
309 }
310
311 static int xlnx_pcie_xvc_execute_tms(struct jtag_command *cmd)
312 {
313 const size_t num_bits = cmd->cmd.tms->num_bits;
314 const uint8_t *bits = cmd->cmd.tms->bits;
315 size_t left, write;
316 uint32_t tms;
317 int err;
318
319 LOG_DEBUG("execute tms %zu", num_bits);
320
321 left = num_bits;
322 while (left) {
323 write = MIN(XLNX_XVC_MAX_BITS, left);
324 tms = buf_get_u32(bits, 0, write);
325 err = xlnx_pcie_xvc_transact(write, tms, 0, NULL);
326 if (err != ERROR_OK)
327 return err;
328 left -= write;
329 bits += 4;
330 };
331
332 return ERROR_OK;
333 }
334
335 static int xlnx_pcie_xvc_execute_command(struct jtag_command *cmd)
336 {
337 LOG_DEBUG("%s: cmd->type: %u", __func__, cmd->type);
338 switch (cmd->type) {
339 case JTAG_STABLECLOCKS:
340 return xlnx_pcie_xvc_execute_stableclocks(cmd);
341 case JTAG_RUNTEST:
342 return xlnx_pcie_xvc_execute_runtest(cmd);
343 case JTAG_TLR_RESET:
344 tap_set_end_state(cmd->cmd.statemove->end_state);
345 return xlnx_pcie_xvc_execute_statemove(0);
346 case JTAG_PATHMOVE:
347 return xlnx_pcie_xvc_execute_pathmove(cmd);
348 case JTAG_SCAN:
349 return xlnx_pcie_xvc_execute_scan(cmd);
350 case JTAG_RESET:
351 xlnx_pcie_xvc_execute_reset(cmd);
352 break;
353 case JTAG_SLEEP:
354 xlnx_pcie_xvc_execute_sleep(cmd);
355 break;
356 case JTAG_TMS:
357 return xlnx_pcie_xvc_execute_tms(cmd);
358 default:
359 LOG_ERROR("BUG: Unknown JTAG command type encountered.");
360 return ERROR_JTAG_QUEUE_FAILED;
361 }
362
363 return ERROR_OK;
364 }
365
366 static int xlnx_pcie_xvc_execute_queue(void)
367 {
368 struct jtag_command *cmd = jtag_command_queue;
369 int ret;
370
371 while (cmd) {
372 ret = xlnx_pcie_xvc_execute_command(cmd);
373
374 if (ret != ERROR_OK)
375 return ret;
376
377 cmd = cmd->next;
378 }
379
380 return ERROR_OK;
381 }
382
383
384 static int xlnx_pcie_xvc_init(void)
385 {
386 char filename[PATH_MAX];
387 uint32_t cap, vh;
388 int err;
389
390 snprintf(filename, PATH_MAX, "/sys/bus/pci/devices/%s/config",
391 xlnx_pcie_xvc->device);
392 xlnx_pcie_xvc->fd = open(filename, O_RDWR | O_SYNC);
393 if (xlnx_pcie_xvc->fd < 0) {
394 LOG_ERROR("Failed to open device: %s", filename);
395 return ERROR_JTAG_INIT_FAILED;
396 }
397
398 LOG_INFO("Scanning PCIe device %s's for Xilinx XVC/PCIe ...",
399 xlnx_pcie_xvc->device);
400 /* Parse the PCIe extended capability list and try to find
401 * vendor specific header */
402 xlnx_pcie_xvc->offset = PCIE_EXT_CAP_LST;
403 while (xlnx_pcie_xvc->offset <= PCI_CFG_SPACE_EXP_SIZE - sizeof(cap) &&
404 xlnx_pcie_xvc->offset >= PCIE_EXT_CAP_LST) {
405 err = xlnx_pcie_xvc_read_reg(XLNX_XVC_EXT_CAP, &cap);
406 if (err != ERROR_OK)
407 return err;
408 LOG_DEBUG("Checking capability at 0x%x; id=0x%04" PRIx32 " version=0x%" PRIx32 " next=0x%" PRIx32,
409 xlnx_pcie_xvc->offset,
410 PCI_EXT_CAP_ID(cap),
411 PCI_EXT_CAP_VER(cap),
412 PCI_EXT_CAP_NEXT(cap));
413 if (PCI_EXT_CAP_ID(cap) == PCI_EXT_CAP_ID_VNDR) {
414 err = xlnx_pcie_xvc_read_reg(XLNX_XVC_VSEC_HDR, &vh);
415 if (err != ERROR_OK)
416 return err;
417 LOG_DEBUG("Checking possible match at 0x%x; id: 0x%" PRIx32 "; rev: 0x%" PRIx32 "; length: 0x%" PRIx32,
418 xlnx_pcie_xvc->offset,
419 PCI_VNDR_HEADER_ID(vh),
420 PCI_VNDR_HEADER_REV(vh),
421 PCI_VNDR_HEADER_LEN(vh));
422 if ((PCI_VNDR_HEADER_ID(vh) == XLNX_XVC_VSEC_ID) &&
423 (PCI_VNDR_HEADER_LEN(vh) == XLNX_XVC_CAP_SIZE))
424 break;
425 }
426 xlnx_pcie_xvc->offset = PCI_EXT_CAP_NEXT(cap);
427 }
428 if ((xlnx_pcie_xvc->offset > PCI_CFG_SPACE_EXP_SIZE - XLNX_XVC_CAP_SIZE) ||
429 xlnx_pcie_xvc->offset < PCIE_EXT_CAP_LST) {
430 close(xlnx_pcie_xvc->fd);
431 return ERROR_JTAG_INIT_FAILED;
432 }
433
434 LOG_INFO("Found Xilinx XVC/PCIe capability at offset: 0x%x", xlnx_pcie_xvc->offset);
435
436 return ERROR_OK;
437 }
438
439 static int xlnx_pcie_xvc_quit(void)
440 {
441 int err;
442
443 err = close(xlnx_pcie_xvc->fd);
444 if (err)
445 return err;
446
447 return ERROR_OK;
448 }
449
450 COMMAND_HANDLER(xlnx_pcie_xvc_handle_config_command)
451 {
452 if (CMD_ARGC < 1)
453 return ERROR_COMMAND_SYNTAX_ERROR;
454
455 /* we can't really free this in a safe manner, so at least
456 * limit the memory we're leaking by freeing the old one first
457 * before allocating a new one ...
458 */
459 if (xlnx_pcie_xvc->device)
460 free(xlnx_pcie_xvc->device);
461
462 xlnx_pcie_xvc->device = strdup(CMD_ARGV[0]);
463 return ERROR_OK;
464 }
465
466 static const struct command_registration xlnx_pcie_xvc_command_handlers[] = {
467 {
468 .name = "xlnx_pcie_xvc_config",
469 .handler = xlnx_pcie_xvc_handle_config_command,
470 .mode = COMMAND_CONFIG,
471 .help = "Configure XVC/PCIe JTAG adapter",
472 .usage = "device",
473 },
474 COMMAND_REGISTRATION_DONE
475 };
476
477 static struct jtag_interface xlnx_pcie_xvc_jtag_ops = {
478 .execute_queue = &xlnx_pcie_xvc_execute_queue,
479 };
480
481 static int xlnx_pcie_xvc_swd_sequence(const uint8_t *seq, size_t length)
482 {
483 size_t left, write;
484 uint32_t send;
485 int err;
486
487 left = length;
488 while (left) {
489 write = MIN(XLNX_XVC_MAX_BITS, left);
490 send = buf_get_u32(seq, 0, write);
491 err = xlnx_pcie_xvc_transact(write, send, 0, NULL);
492 if (err != ERROR_OK)
493 return err;
494 left -= write;
495 seq += sizeof(uint32_t);
496 };
497
498 return ERROR_OK;
499 }
500
501 static int xlnx_pcie_xvc_swd_switch_seq(enum swd_special_seq seq)
502 {
503 switch (seq) {
504 case LINE_RESET:
505 LOG_DEBUG("SWD line reset");
506 return xlnx_pcie_xvc_swd_sequence(swd_seq_line_reset,
507 swd_seq_line_reset_len);
508 case JTAG_TO_SWD:
509 LOG_DEBUG("JTAG-to-SWD");
510 return xlnx_pcie_xvc_swd_sequence(swd_seq_jtag_to_swd,
511 swd_seq_jtag_to_swd_len);
512 case SWD_TO_JTAG:
513 LOG_DEBUG("SWD-to-JTAG");
514 return xlnx_pcie_xvc_swd_sequence(swd_seq_swd_to_jtag,
515 swd_seq_swd_to_jtag_len);
516 default:
517 LOG_ERROR("Sequence %d not supported", seq);
518 return ERROR_FAIL;
519 }
520
521 return ERROR_OK;
522 }
523
524 static int queued_retval;
525
526 static void xlnx_pcie_xvc_swd_write_reg(uint8_t cmd, uint32_t value,
527 uint32_t ap_delay_clk);
528
529 static void swd_clear_sticky_errors(void)
530 {
531 xlnx_pcie_xvc_swd_write_reg(swd_cmd(false, false, DP_ABORT),
532 STKCMPCLR | STKERRCLR | WDERRCLR | ORUNERRCLR, 0);
533 }
534
535 static void xlnx_pcie_xvc_swd_read_reg(uint8_t cmd, uint32_t *value,
536 uint32_t ap_delay_clk)
537 {
538 uint32_t res, ack, rpar;
539 int err;
540
541 assert(cmd & SWD_CMD_RnW);
542
543 cmd |= SWD_CMD_START | SWD_CMD_PARK;
544 /* cmd + ack */
545 err = xlnx_pcie_xvc_transact(12, cmd, 0, &res);
546 if (err != ERROR_OK)
547 goto err_out;
548
549 ack = MASK_ACK(res);
550
551 /* read data */
552 err = xlnx_pcie_xvc_transact(32, 0, 0, &res);
553 if (err != ERROR_OK)
554 goto err_out;
555
556 /* parity + trn */
557 err = xlnx_pcie_xvc_transact(2, 0, 0, &rpar);
558 if (err != ERROR_OK)
559 goto err_out;
560
561 LOG_DEBUG("%s %s %s reg %X = %08"PRIx32,
562 ack == SWD_ACK_OK ? "OK" : ack == SWD_ACK_WAIT ?
563 "WAIT" : ack == SWD_ACK_FAULT ? "FAULT" : "JUNK",
564 cmd & SWD_CMD_APnDP ? "AP" : "DP",
565 cmd & SWD_CMD_RnW ? "read" : "write",
566 (cmd & SWD_CMD_A32) >> 1,
567 res);
568 switch (ack) {
569 case SWD_ACK_OK:
570 if (MASK_PAR(rpar) != parity_u32(res)) {
571 LOG_DEBUG_IO("Wrong parity detected");
572 queued_retval = ERROR_FAIL;
573 return;
574 }
575 if (value)
576 *value = res;
577 if (cmd & SWD_CMD_APnDP)
578 err = xlnx_pcie_xvc_transact(ap_delay_clk, 0, 0, NULL);
579 queued_retval = err;
580 return;
581 case SWD_ACK_WAIT:
582 LOG_DEBUG_IO("SWD_ACK_WAIT");
583 swd_clear_sticky_errors();
584 return;
585 case SWD_ACK_FAULT:
586 LOG_DEBUG_IO("SWD_ACK_FAULT");
587 queued_retval = ack;
588 return;
589 default:
590 LOG_DEBUG_IO("No valid acknowledge: ack=%02"PRIx32, ack);
591 queued_retval = ack;
592 return;
593 }
594 err_out:
595 queued_retval = err;
596 }
597
598 static void xlnx_pcie_xvc_swd_write_reg(uint8_t cmd, uint32_t value,
599 uint32_t ap_delay_clk)
600 {
601 uint32_t res, ack;
602 int err;
603
604 assert(!(cmd & SWD_CMD_RnW));
605
606 cmd |= SWD_CMD_START | SWD_CMD_PARK;
607 /* cmd + trn + ack */
608 err = xlnx_pcie_xvc_transact(13, cmd, 0, &res);
609 if (err != ERROR_OK)
610 goto err_out;
611
612 ack = MASK_ACK(res);
613
614 /* write data */
615 err = xlnx_pcie_xvc_transact(32, value, 0, NULL);
616 if (err != ERROR_OK)
617 goto err_out;
618
619 /* parity + trn */
620 err = xlnx_pcie_xvc_transact(2, parity_u32(value), 0, NULL);
621 if (err != ERROR_OK)
622 goto err_out;
623
624 LOG_DEBUG("%s %s %s reg %X = %08"PRIx32,
625 ack == SWD_ACK_OK ? "OK" : ack == SWD_ACK_WAIT ?
626 "WAIT" : ack == SWD_ACK_FAULT ? "FAULT" : "JUNK",
627 cmd & SWD_CMD_APnDP ? "AP" : "DP",
628 cmd & SWD_CMD_RnW ? "read" : "write",
629 (cmd & SWD_CMD_A32) >> 1,
630 value);
631
632 switch (ack) {
633 case SWD_ACK_OK:
634 if (cmd & SWD_CMD_APnDP)
635 err = xlnx_pcie_xvc_transact(ap_delay_clk, 0, 0, NULL);
636 queued_retval = err;
637 return;
638 case SWD_ACK_WAIT:
639 LOG_DEBUG_IO("SWD_ACK_WAIT");
640 swd_clear_sticky_errors();
641 return;
642 case SWD_ACK_FAULT:
643 LOG_DEBUG_IO("SWD_ACK_FAULT");
644 queued_retval = ack;
645 return;
646 default:
647 LOG_DEBUG_IO("No valid acknowledge: ack=%02"PRIx32, ack);
648 queued_retval = ack;
649 return;
650 }
651
652 err_out:
653 queued_retval = err;
654 }
655
656 static int xlnx_pcie_xvc_swd_run_queue(void)
657 {
658 int err;
659
660 /* we want at least 8 idle cycles between each transaction */
661 err = xlnx_pcie_xvc_transact(8, 0, 0, NULL);
662 if (err != ERROR_OK)
663 return err;
664
665 err = queued_retval;
666 queued_retval = ERROR_OK;
667 LOG_DEBUG("SWD queue return value: %02x", err);
668
669 return err;
670 }
671
672 static int xlnx_pcie_xvc_swd_init(void)
673 {
674 return ERROR_OK;
675 }
676
677 static const struct swd_driver xlnx_pcie_xvc_swd_ops = {
678 .init = xlnx_pcie_xvc_swd_init,
679 .switch_seq = xlnx_pcie_xvc_swd_switch_seq,
680 .read_reg = xlnx_pcie_xvc_swd_read_reg,
681 .write_reg = xlnx_pcie_xvc_swd_write_reg,
682 .run = xlnx_pcie_xvc_swd_run_queue,
683 };
684
685 static const char * const xlnx_pcie_xvc_transports[] = { "jtag", "swd", NULL };
686
687 struct adapter_driver xlnx_pcie_xvc_adapter_driver = {
688 .name = "xlnx_pcie_xvc",
689 .transports = xlnx_pcie_xvc_transports,
690 .commands = xlnx_pcie_xvc_command_handlers,
691
692 .init = &xlnx_pcie_xvc_init,
693 .quit = &xlnx_pcie_xvc_quit,
694
695 .jtag_ops = &xlnx_pcie_xvc_jtag_ops,
696 .swd_ops = &xlnx_pcie_xvc_swd_ops,
697 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)