jtag/swd: rename CamelCase macros
[openocd.git] / src / jtag / drivers / xlnx-pcie-xvc.c
1 /* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright (c) 2019 Google, LLC.
4 * Author: Moritz Fischer <moritzf@google.com>
5 */
6
7 #ifdef HAVE_CONFIG_H
8 #include "config.h"
9 #endif
10
11 #include <stdint.h>
12 #include <stdlib.h>
13 #include <math.h>
14 #include <unistd.h>
15 #include <linux/pci.h>
16
17 #include <jtag/interface.h>
18 #include <jtag/swd.h>
19 #include <jtag/commands.h>
20 #include <helper/replacements.h>
21 #include <helper/bits.h>
22
23 /* Available only from kernel v4.10 */
24 #ifndef PCI_CFG_SPACE_EXP_SIZE
25 #define PCI_CFG_SPACE_EXP_SIZE 4096
26 #endif
27
28 #define PCIE_EXT_CAP_LST 0x100
29
30 #define XLNX_XVC_EXT_CAP 0x00
31 #define XLNX_XVC_VSEC_HDR 0x04
32 #define XLNX_XVC_LEN_REG 0x0C
33 #define XLNX_XVC_TMS_REG 0x10
34 #define XLNX_XVC_TDX_REG 0x14
35
36 #define XLNX_XVC_CAP_SIZE 0x20
37 #define XLNX_XVC_VSEC_ID 0x8
38 #define XLNX_XVC_MAX_BITS 0x20
39
40 #define MASK_ACK(x) (((x) >> 9) & 0x7)
41 #define MASK_PAR(x) ((int)((x) & 0x1))
42
43 struct xlnx_pcie_xvc {
44 int fd;
45 unsigned offset;
46 char *device;
47 };
48
49 static struct xlnx_pcie_xvc xlnx_pcie_xvc_state;
50 static struct xlnx_pcie_xvc *xlnx_pcie_xvc = &xlnx_pcie_xvc_state;
51
52 static int xlnx_pcie_xvc_read_reg(const int offset, uint32_t *val)
53 {
54 uint32_t res;
55 int err;
56
57 /* Note: This should be ok endianness-wise because by going
58 * through sysfs the kernel does the conversion in the config
59 * space accessor functions
60 */
61 err = pread(xlnx_pcie_xvc->fd, &res, sizeof(res),
62 xlnx_pcie_xvc->offset + offset);
63 if (err != sizeof(res)) {
64 LOG_ERROR("Failed to read offset %x", offset);
65 return ERROR_JTAG_DEVICE_ERROR;
66 }
67
68 if (val)
69 *val = res;
70
71 return ERROR_OK;
72 }
73
74 static int xlnx_pcie_xvc_write_reg(const int offset, const uint32_t val)
75 {
76 int err;
77
78 /* Note: This should be ok endianness-wise because by going
79 * through sysfs the kernel does the conversion in the config
80 * space accessor functions
81 */
82 err = pwrite(xlnx_pcie_xvc->fd, &val, sizeof(val),
83 xlnx_pcie_xvc->offset + offset);
84 if (err != sizeof(val)) {
85 LOG_ERROR("Failed to write offset: %x with value: %" PRIx32,
86 offset, val);
87 return ERROR_JTAG_DEVICE_ERROR;
88 }
89
90 return ERROR_OK;
91 }
92
93 static int xlnx_pcie_xvc_transact(size_t num_bits, uint32_t tms, uint32_t tdi,
94 uint32_t *tdo)
95 {
96 int err;
97
98 err = xlnx_pcie_xvc_write_reg(XLNX_XVC_LEN_REG, num_bits);
99 if (err != ERROR_OK)
100 return err;
101
102 err = xlnx_pcie_xvc_write_reg(XLNX_XVC_TMS_REG, tms);
103 if (err != ERROR_OK)
104 return err;
105
106 err = xlnx_pcie_xvc_write_reg(XLNX_XVC_TDX_REG, tdi);
107 if (err != ERROR_OK)
108 return err;
109
110 err = xlnx_pcie_xvc_read_reg(XLNX_XVC_TDX_REG, tdo);
111 if (err != ERROR_OK)
112 return err;
113
114 if (tdo)
115 LOG_DEBUG_IO("Transact num_bits: %zu, tms: %" PRIx32 ", tdi: %" PRIx32 ", tdo: %" PRIx32,
116 num_bits, tms, tdi, *tdo);
117 else
118 LOG_DEBUG_IO("Transact num_bits: %zu, tms: %" PRIx32 ", tdi: %" PRIx32 ", tdo: <null>",
119 num_bits, tms, tdi);
120 return ERROR_OK;
121 }
122
123 static int xlnx_pcie_xvc_execute_stableclocks(struct jtag_command *cmd)
124 {
125 int tms = tap_get_state() == TAP_RESET ? 1 : 0;
126 size_t left = cmd->cmd.stableclocks->num_cycles;
127 size_t write;
128 int err;
129
130 LOG_DEBUG("stableclocks %i cycles", cmd->cmd.runtest->num_cycles);
131
132 while (left) {
133 write = MIN(XLNX_XVC_MAX_BITS, left);
134 err = xlnx_pcie_xvc_transact(write, tms, 0, NULL);
135 if (err != ERROR_OK)
136 return err;
137 left -= write;
138 };
139
140 return ERROR_OK;
141 }
142
143 static int xlnx_pcie_xvc_execute_statemove(size_t skip)
144 {
145 uint8_t tms_scan = tap_get_tms_path(tap_get_state(),
146 tap_get_end_state());
147 int tms_count = tap_get_tms_path_len(tap_get_state(),
148 tap_get_end_state());
149 int err;
150
151 LOG_DEBUG("statemove starting at (skip: %zu) %s end in %s", skip,
152 tap_state_name(tap_get_state()),
153 tap_state_name(tap_get_end_state()));
154
155
156 err = xlnx_pcie_xvc_transact(tms_count - skip, tms_scan >> skip, 0, NULL);
157 if (err != ERROR_OK)
158 return err;
159
160 tap_set_state(tap_get_end_state());
161
162 return ERROR_OK;
163 }
164
165 static int xlnx_pcie_xvc_execute_runtest(struct jtag_command *cmd)
166 {
167 int err = ERROR_OK;
168
169 LOG_DEBUG("runtest %i cycles, end in %i",
170 cmd->cmd.runtest->num_cycles,
171 cmd->cmd.runtest->end_state);
172
173 tap_state_t tmp_state = tap_get_end_state();
174
175 if (tap_get_state() != TAP_IDLE) {
176 tap_set_end_state(TAP_IDLE);
177 err = xlnx_pcie_xvc_execute_statemove(0);
178 if (err != ERROR_OK)
179 return err;
180 };
181
182 size_t left = cmd->cmd.runtest->num_cycles;
183 size_t write;
184
185 while (left) {
186 write = MIN(XLNX_XVC_MAX_BITS, left);
187 err = xlnx_pcie_xvc_transact(write, 0, 0, NULL);
188 if (err != ERROR_OK)
189 return err;
190 left -= write;
191 };
192
193 tap_set_end_state(tmp_state);
194 if (tap_get_state() != tap_get_end_state())
195 err = xlnx_pcie_xvc_execute_statemove(0);
196
197 return err;
198 }
199
200 static int xlnx_pcie_xvc_execute_pathmove(struct jtag_command *cmd)
201 {
202 size_t num_states = cmd->cmd.pathmove->num_states;
203 tap_state_t *path = cmd->cmd.pathmove->path;
204 int err = ERROR_OK;
205 size_t i;
206
207 LOG_DEBUG("pathmove: %i states, end in %i",
208 cmd->cmd.pathmove->num_states,
209 cmd->cmd.pathmove->path[cmd->cmd.pathmove->num_states - 1]);
210
211 for (i = 0; i < num_states; i++) {
212 if (path[i] == tap_state_transition(tap_get_state(), false)) {
213 err = xlnx_pcie_xvc_transact(1, 1, 0, NULL);
214 } else if (path[i] == tap_state_transition(tap_get_state(), true)) {
215 err = xlnx_pcie_xvc_transact(1, 0, 0, NULL);
216 } else {
217 LOG_ERROR("BUG: %s -> %s isn't a valid TAP transition.",
218 tap_state_name(tap_get_state()),
219 tap_state_name(path[i]));
220 err = ERROR_JTAG_QUEUE_FAILED;
221 }
222 if (err != ERROR_OK)
223 return err;
224 tap_set_state(path[i]);
225 }
226
227 tap_set_end_state(tap_get_state());
228
229 return ERROR_OK;
230 }
231
232 static int xlnx_pcie_xvc_execute_scan(struct jtag_command *cmd)
233 {
234 enum scan_type type = jtag_scan_type(cmd->cmd.scan);
235 tap_state_t saved_end_state = cmd->cmd.scan->end_state;
236 bool ir_scan = cmd->cmd.scan->ir_scan;
237 uint32_t tdi, tms, tdo;
238 uint8_t *buf, *rd_ptr;
239 int err, scan_size;
240 size_t write;
241 size_t left;
242
243 scan_size = jtag_build_buffer(cmd->cmd.scan, &buf);
244 rd_ptr = buf;
245 LOG_DEBUG("%s scan type %d %d bits; starts in %s end in %s",
246 (cmd->cmd.scan->ir_scan) ? "IR" : "DR", type, scan_size,
247 tap_state_name(tap_get_state()),
248 tap_state_name(cmd->cmd.scan->end_state));
249
250 /* If we're in TAP_DR_SHIFT state but need to do a IR_SCAN or
251 * vice-versa, do a statemove to corresponding other state, then restore
252 * end state
253 */
254 if (ir_scan && tap_get_state() != TAP_IRSHIFT) {
255 tap_set_end_state(TAP_IRSHIFT);
256 err = xlnx_pcie_xvc_execute_statemove(0);
257 if (err != ERROR_OK)
258 goto out_err;
259 tap_set_end_state(saved_end_state);
260 } else if (!ir_scan && (tap_get_state() != TAP_DRSHIFT)) {
261 tap_set_end_state(TAP_DRSHIFT);
262 err = xlnx_pcie_xvc_execute_statemove(0);
263 if (err != ERROR_OK)
264 goto out_err;
265 tap_set_end_state(saved_end_state);
266 }
267
268 left = scan_size;
269 while (left) {
270 write = MIN(XLNX_XVC_MAX_BITS, left);
271 /* the last TMS should be a 1, to leave the state */
272 tms = left <= XLNX_XVC_MAX_BITS ? BIT(write - 1) : 0;
273 tdi = (type != SCAN_IN) ? buf_get_u32(rd_ptr, 0, write) : 0;
274 err = xlnx_pcie_xvc_transact(write, tms, tdi, type != SCAN_OUT ?
275 &tdo : NULL);
276 if (err != ERROR_OK)
277 goto out_err;
278 left -= write;
279 if (type != SCAN_OUT)
280 buf_set_u32(rd_ptr, 0, write, tdo);
281 rd_ptr += sizeof(uint32_t);
282 };
283
284 err = jtag_read_buffer(buf, cmd->cmd.scan);
285 free(buf);
286
287 if (tap_get_state() != tap_get_end_state())
288 err = xlnx_pcie_xvc_execute_statemove(1);
289
290 return err;
291
292 out_err:
293 free(buf);
294 return err;
295 }
296
297 static void xlnx_pcie_xvc_execute_reset(struct jtag_command *cmd)
298 {
299 LOG_DEBUG("reset trst: %i srst: %i", cmd->cmd.reset->trst,
300 cmd->cmd.reset->srst);
301 }
302
303 static void xlnx_pcie_xvc_execute_sleep(struct jtag_command *cmd)
304 {
305 LOG_DEBUG("sleep %" PRIu32 "", cmd->cmd.sleep->us);
306 usleep(cmd->cmd.sleep->us);
307 }
308
309 static int xlnx_pcie_xvc_execute_tms(struct jtag_command *cmd)
310 {
311 const size_t num_bits = cmd->cmd.tms->num_bits;
312 const uint8_t *bits = cmd->cmd.tms->bits;
313 size_t left, write;
314 uint32_t tms;
315 int err;
316
317 LOG_DEBUG("execute tms %zu", num_bits);
318
319 left = num_bits;
320 while (left) {
321 write = MIN(XLNX_XVC_MAX_BITS, left);
322 tms = buf_get_u32(bits, 0, write);
323 err = xlnx_pcie_xvc_transact(write, tms, 0, NULL);
324 if (err != ERROR_OK)
325 return err;
326 left -= write;
327 bits += 4;
328 };
329
330 return ERROR_OK;
331 }
332
333 static int xlnx_pcie_xvc_execute_command(struct jtag_command *cmd)
334 {
335 LOG_DEBUG("%s: cmd->type: %u", __func__, cmd->type);
336 switch (cmd->type) {
337 case JTAG_STABLECLOCKS:
338 return xlnx_pcie_xvc_execute_stableclocks(cmd);
339 case JTAG_RUNTEST:
340 return xlnx_pcie_xvc_execute_runtest(cmd);
341 case JTAG_TLR_RESET:
342 tap_set_end_state(cmd->cmd.statemove->end_state);
343 return xlnx_pcie_xvc_execute_statemove(0);
344 case JTAG_PATHMOVE:
345 return xlnx_pcie_xvc_execute_pathmove(cmd);
346 case JTAG_SCAN:
347 return xlnx_pcie_xvc_execute_scan(cmd);
348 case JTAG_RESET:
349 xlnx_pcie_xvc_execute_reset(cmd);
350 break;
351 case JTAG_SLEEP:
352 xlnx_pcie_xvc_execute_sleep(cmd);
353 break;
354 case JTAG_TMS:
355 return xlnx_pcie_xvc_execute_tms(cmd);
356 default:
357 LOG_ERROR("BUG: Unknown JTAG command type encountered.");
358 return ERROR_JTAG_QUEUE_FAILED;
359 }
360
361 return ERROR_OK;
362 }
363
364 static int xlnx_pcie_xvc_execute_queue(void)
365 {
366 struct jtag_command *cmd = jtag_command_queue;
367 int ret;
368
369 while (cmd) {
370 ret = xlnx_pcie_xvc_execute_command(cmd);
371
372 if (ret != ERROR_OK)
373 return ret;
374
375 cmd = cmd->next;
376 }
377
378 return ERROR_OK;
379 }
380
381
382 static int xlnx_pcie_xvc_init(void)
383 {
384 char filename[PATH_MAX];
385 uint32_t cap, vh;
386 int err;
387
388 snprintf(filename, PATH_MAX, "/sys/bus/pci/devices/%s/config",
389 xlnx_pcie_xvc->device);
390 xlnx_pcie_xvc->fd = open(filename, O_RDWR | O_SYNC);
391 if (xlnx_pcie_xvc->fd < 0) {
392 LOG_ERROR("Failed to open device: %s", filename);
393 return ERROR_JTAG_INIT_FAILED;
394 }
395
396 LOG_INFO("Scanning PCIe device %s's for Xilinx XVC/PCIe ...",
397 xlnx_pcie_xvc->device);
398 /* Parse the PCIe extended capability list and try to find
399 * vendor specific header */
400 xlnx_pcie_xvc->offset = PCIE_EXT_CAP_LST;
401 while (xlnx_pcie_xvc->offset <= PCI_CFG_SPACE_EXP_SIZE - sizeof(cap) &&
402 xlnx_pcie_xvc->offset >= PCIE_EXT_CAP_LST) {
403 err = xlnx_pcie_xvc_read_reg(XLNX_XVC_EXT_CAP, &cap);
404 if (err != ERROR_OK)
405 return err;
406 LOG_DEBUG("Checking capability at 0x%x; id=0x%04" PRIx32 " version=0x%" PRIx32 " next=0x%" PRIx32,
407 xlnx_pcie_xvc->offset,
408 PCI_EXT_CAP_ID(cap),
409 PCI_EXT_CAP_VER(cap),
410 PCI_EXT_CAP_NEXT(cap));
411 if (PCI_EXT_CAP_ID(cap) == PCI_EXT_CAP_ID_VNDR) {
412 err = xlnx_pcie_xvc_read_reg(XLNX_XVC_VSEC_HDR, &vh);
413 if (err != ERROR_OK)
414 return err;
415 LOG_DEBUG("Checking possible match at 0x%x; id: 0x%" PRIx32 "; rev: 0x%" PRIx32 "; length: 0x%" PRIx32,
416 xlnx_pcie_xvc->offset,
417 PCI_VNDR_HEADER_ID(vh),
418 PCI_VNDR_HEADER_REV(vh),
419 PCI_VNDR_HEADER_LEN(vh));
420 if ((PCI_VNDR_HEADER_ID(vh) == XLNX_XVC_VSEC_ID) &&
421 (PCI_VNDR_HEADER_LEN(vh) == XLNX_XVC_CAP_SIZE))
422 break;
423 }
424 xlnx_pcie_xvc->offset = PCI_EXT_CAP_NEXT(cap);
425 }
426 if ((xlnx_pcie_xvc->offset > PCI_CFG_SPACE_EXP_SIZE - XLNX_XVC_CAP_SIZE) ||
427 xlnx_pcie_xvc->offset < PCIE_EXT_CAP_LST) {
428 close(xlnx_pcie_xvc->fd);
429 return ERROR_JTAG_INIT_FAILED;
430 }
431
432 LOG_INFO("Found Xilinx XVC/PCIe capability at offset: 0x%x", xlnx_pcie_xvc->offset);
433
434 return ERROR_OK;
435 }
436
437 static int xlnx_pcie_xvc_quit(void)
438 {
439 int err;
440
441 err = close(xlnx_pcie_xvc->fd);
442 if (err)
443 return err;
444
445 return ERROR_OK;
446 }
447
448 COMMAND_HANDLER(xlnx_pcie_xvc_handle_config_command)
449 {
450 if (CMD_ARGC < 1)
451 return ERROR_COMMAND_SYNTAX_ERROR;
452
453 /* we can't really free this in a safe manner, so at least
454 * limit the memory we're leaking by freeing the old one first
455 * before allocating a new one ...
456 */
457 free(xlnx_pcie_xvc->device);
458
459 xlnx_pcie_xvc->device = strdup(CMD_ARGV[0]);
460 return ERROR_OK;
461 }
462
463 static const struct command_registration xlnx_pcie_xvc_command_handlers[] = {
464 {
465 .name = "xlnx_pcie_xvc_config",
466 .handler = xlnx_pcie_xvc_handle_config_command,
467 .mode = COMMAND_CONFIG,
468 .help = "Configure XVC/PCIe JTAG adapter",
469 .usage = "device",
470 },
471 COMMAND_REGISTRATION_DONE
472 };
473
474 static struct jtag_interface xlnx_pcie_xvc_jtag_ops = {
475 .execute_queue = &xlnx_pcie_xvc_execute_queue,
476 };
477
478 static int xlnx_pcie_xvc_swd_sequence(const uint8_t *seq, size_t length)
479 {
480 size_t left, write;
481 uint32_t send;
482 int err;
483
484 left = length;
485 while (left) {
486 write = MIN(XLNX_XVC_MAX_BITS, left);
487 send = buf_get_u32(seq, 0, write);
488 err = xlnx_pcie_xvc_transact(write, send, 0, NULL);
489 if (err != ERROR_OK)
490 return err;
491 left -= write;
492 seq += sizeof(uint32_t);
493 };
494
495 return ERROR_OK;
496 }
497
498 static int xlnx_pcie_xvc_swd_switch_seq(enum swd_special_seq seq)
499 {
500 switch (seq) {
501 case LINE_RESET:
502 LOG_DEBUG("SWD line reset");
503 return xlnx_pcie_xvc_swd_sequence(swd_seq_line_reset,
504 swd_seq_line_reset_len);
505 case JTAG_TO_SWD:
506 LOG_DEBUG("JTAG-to-SWD");
507 return xlnx_pcie_xvc_swd_sequence(swd_seq_jtag_to_swd,
508 swd_seq_jtag_to_swd_len);
509 case SWD_TO_JTAG:
510 LOG_DEBUG("SWD-to-JTAG");
511 return xlnx_pcie_xvc_swd_sequence(swd_seq_swd_to_jtag,
512 swd_seq_swd_to_jtag_len);
513 default:
514 LOG_ERROR("Sequence %d not supported", seq);
515 return ERROR_FAIL;
516 }
517
518 return ERROR_OK;
519 }
520
521 static int queued_retval;
522
523 static void xlnx_pcie_xvc_swd_write_reg(uint8_t cmd, uint32_t value,
524 uint32_t ap_delay_clk);
525
526 static void swd_clear_sticky_errors(void)
527 {
528 xlnx_pcie_xvc_swd_write_reg(swd_cmd(false, false, DP_ABORT),
529 STKCMPCLR | STKERRCLR | WDERRCLR | ORUNERRCLR, 0);
530 }
531
532 static void xlnx_pcie_xvc_swd_read_reg(uint8_t cmd, uint32_t *value,
533 uint32_t ap_delay_clk)
534 {
535 uint32_t res, ack, rpar;
536 int err;
537
538 assert(cmd & SWD_CMD_RNW);
539
540 cmd |= SWD_CMD_START | SWD_CMD_PARK;
541 /* cmd + ack */
542 err = xlnx_pcie_xvc_transact(12, cmd, 0, &res);
543 if (err != ERROR_OK)
544 goto err_out;
545
546 ack = MASK_ACK(res);
547
548 /* read data */
549 err = xlnx_pcie_xvc_transact(32, 0, 0, &res);
550 if (err != ERROR_OK)
551 goto err_out;
552
553 /* parity + trn */
554 err = xlnx_pcie_xvc_transact(2, 0, 0, &rpar);
555 if (err != ERROR_OK)
556 goto err_out;
557
558 LOG_DEBUG("%s %s %s reg %X = %08"PRIx32,
559 ack == SWD_ACK_OK ? "OK" : ack == SWD_ACK_WAIT ?
560 "WAIT" : ack == SWD_ACK_FAULT ? "FAULT" : "JUNK",
561 cmd & SWD_CMD_APNDP ? "AP" : "DP",
562 cmd & SWD_CMD_RNW ? "read" : "write",
563 (cmd & SWD_CMD_A32) >> 1,
564 res);
565 switch (ack) {
566 case SWD_ACK_OK:
567 if (MASK_PAR(rpar) != parity_u32(res)) {
568 LOG_DEBUG_IO("Wrong parity detected");
569 queued_retval = ERROR_FAIL;
570 return;
571 }
572 if (value)
573 *value = res;
574 if (cmd & SWD_CMD_APNDP)
575 err = xlnx_pcie_xvc_transact(ap_delay_clk, 0, 0, NULL);
576 queued_retval = err;
577 return;
578 case SWD_ACK_WAIT:
579 LOG_DEBUG_IO("SWD_ACK_WAIT");
580 swd_clear_sticky_errors();
581 return;
582 case SWD_ACK_FAULT:
583 LOG_DEBUG_IO("SWD_ACK_FAULT");
584 queued_retval = ack;
585 return;
586 default:
587 LOG_DEBUG_IO("No valid acknowledge: ack=%02"PRIx32, ack);
588 queued_retval = ack;
589 return;
590 }
591 err_out:
592 queued_retval = err;
593 }
594
595 static void xlnx_pcie_xvc_swd_write_reg(uint8_t cmd, uint32_t value,
596 uint32_t ap_delay_clk)
597 {
598 uint32_t res, ack;
599 int err;
600
601 assert(!(cmd & SWD_CMD_RNW));
602
603 cmd |= SWD_CMD_START | SWD_CMD_PARK;
604 /* cmd + trn + ack */
605 err = xlnx_pcie_xvc_transact(13, cmd, 0, &res);
606 if (err != ERROR_OK)
607 goto err_out;
608
609 ack = MASK_ACK(res);
610
611 /* write data */
612 err = xlnx_pcie_xvc_transact(32, value, 0, NULL);
613 if (err != ERROR_OK)
614 goto err_out;
615
616 /* parity + trn */
617 err = xlnx_pcie_xvc_transact(2, parity_u32(value), 0, NULL);
618 if (err != ERROR_OK)
619 goto err_out;
620
621 LOG_DEBUG("%s %s %s reg %X = %08"PRIx32,
622 ack == SWD_ACK_OK ? "OK" : ack == SWD_ACK_WAIT ?
623 "WAIT" : ack == SWD_ACK_FAULT ? "FAULT" : "JUNK",
624 cmd & SWD_CMD_APNDP ? "AP" : "DP",
625 cmd & SWD_CMD_RNW ? "read" : "write",
626 (cmd & SWD_CMD_A32) >> 1,
627 value);
628
629 switch (ack) {
630 case SWD_ACK_OK:
631 if (cmd & SWD_CMD_APNDP)
632 err = xlnx_pcie_xvc_transact(ap_delay_clk, 0, 0, NULL);
633 queued_retval = err;
634 return;
635 case SWD_ACK_WAIT:
636 LOG_DEBUG_IO("SWD_ACK_WAIT");
637 swd_clear_sticky_errors();
638 return;
639 case SWD_ACK_FAULT:
640 LOG_DEBUG_IO("SWD_ACK_FAULT");
641 queued_retval = ack;
642 return;
643 default:
644 LOG_DEBUG_IO("No valid acknowledge: ack=%02"PRIx32, ack);
645 queued_retval = ack;
646 return;
647 }
648
649 err_out:
650 queued_retval = err;
651 }
652
653 static int xlnx_pcie_xvc_swd_run_queue(void)
654 {
655 int err;
656
657 /* we want at least 8 idle cycles between each transaction */
658 err = xlnx_pcie_xvc_transact(8, 0, 0, NULL);
659 if (err != ERROR_OK)
660 return err;
661
662 err = queued_retval;
663 queued_retval = ERROR_OK;
664 LOG_DEBUG("SWD queue return value: %02x", err);
665
666 return err;
667 }
668
669 static int xlnx_pcie_xvc_swd_init(void)
670 {
671 return ERROR_OK;
672 }
673
674 static const struct swd_driver xlnx_pcie_xvc_swd_ops = {
675 .init = xlnx_pcie_xvc_swd_init,
676 .switch_seq = xlnx_pcie_xvc_swd_switch_seq,
677 .read_reg = xlnx_pcie_xvc_swd_read_reg,
678 .write_reg = xlnx_pcie_xvc_swd_write_reg,
679 .run = xlnx_pcie_xvc_swd_run_queue,
680 };
681
682 static const char * const xlnx_pcie_xvc_transports[] = { "jtag", "swd", NULL };
683
684 struct adapter_driver xlnx_pcie_xvc_adapter_driver = {
685 .name = "xlnx_pcie_xvc",
686 .transports = xlnx_pcie_xvc_transports,
687 .commands = xlnx_pcie_xvc_command_handlers,
688
689 .init = &xlnx_pcie_xvc_init,
690 .quit = &xlnx_pcie_xvc_quit,
691
692 .jtag_ops = &xlnx_pcie_xvc_jtag_ops,
693 .swd_ops = &xlnx_pcie_xvc_swd_ops,
694 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)