89ecc2658739484cafabcc79d7e9d4a09513afb6
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program; if not, write to the *
17 * Free Software Foundation, Inc., *
18 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
19 ***************************************************************************/
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "replacements.h"
25
26 #include "xscale.h"
27
28 #include "register.h"
29 #include "target.h"
30 #include "armv4_5.h"
31 #include "arm_simulator.h"
32 #include "arm_disassembler.h"
33 #include "log.h"
34 #include "jtag.h"
35 #include "binarybuffer.h"
36 #include "time_support.h"
37 #include "breakpoints.h"
38 #include "fileio.h"
39
40 #include <stdlib.h>
41 #include <string.h>
42
43 #include <sys/types.h>
44 #include <unistd.h>
45 #include <errno.h>
46
47
48 /* cli handling */
49 int xscale_register_commands(struct command_context_s *cmd_ctx);
50
51 /* forward declarations */
52 int xscale_target_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc, struct target_s *target);
53 int xscale_init_target(struct command_context_s *cmd_ctx, struct target_s *target);
54 int xscale_quit();
55
56 int xscale_arch_state(struct target_s *target);
57 int xscale_poll(target_t *target);
58 int xscale_halt(target_t *target);
59 int xscale_resume(struct target_s *target, int current, u32 address, int handle_breakpoints, int debug_execution);
60 int xscale_step(struct target_s *target, int current, u32 address, int handle_breakpoints);
61 int xscale_debug_entry(target_t *target);
62 int xscale_restore_context(target_t *target);
63
64 int xscale_assert_reset(target_t *target);
65 int xscale_deassert_reset(target_t *target);
66 int xscale_soft_reset_halt(struct target_s *target);
67 int xscale_prepare_reset_halt(struct target_s *target);
68
69 int xscale_set_reg_u32(reg_t *reg, u32 value);
70
71 int xscale_read_core_reg(struct target_s *target, int num, enum armv4_5_mode mode);
72 int xscale_write_core_reg(struct target_s *target, int num, enum armv4_5_mode mode, u32 value);
73
74 int xscale_read_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer);
75 int xscale_write_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer);
76 int xscale_bulk_write_memory(target_t *target, u32 address, u32 count, u8 *buffer);
77 int xscale_checksum_memory(struct target_s *target, u32 address, u32 count, u32* checksum);
78
79 int xscale_add_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
80 int xscale_remove_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
81 int xscale_set_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
82 int xscale_unset_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
83 int xscale_add_watchpoint(struct target_s *target, watchpoint_t *watchpoint);
84 int xscale_remove_watchpoint(struct target_s *target, watchpoint_t *watchpoint);
85 void xscale_enable_watchpoints(struct target_s *target);
86 void xscale_enable_breakpoints(struct target_s *target);
87 static int xscale_virt2phys(struct target_s *target, u32 virtual, u32 *physical);
88 static int xscale_mmu(struct target_s *target, int *enabled);
89
90 int xscale_read_trace(target_t *target);
91
92 target_type_t xscale_target =
93 {
94 .name = "xscale",
95
96 .poll = xscale_poll,
97 .arch_state = xscale_arch_state,
98
99 .target_request_data = NULL,
100
101 .halt = xscale_halt,
102 .resume = xscale_resume,
103 .step = xscale_step,
104
105 .assert_reset = xscale_assert_reset,
106 .deassert_reset = xscale_deassert_reset,
107 .soft_reset_halt = xscale_soft_reset_halt,
108 .prepare_reset_halt = xscale_prepare_reset_halt,
109
110 .get_gdb_reg_list = armv4_5_get_gdb_reg_list,
111
112 .read_memory = xscale_read_memory,
113 .write_memory = xscale_write_memory,
114 .bulk_write_memory = xscale_bulk_write_memory,
115 .checksum_memory = xscale_checksum_memory,
116
117 .run_algorithm = armv4_5_run_algorithm,
118
119 .add_breakpoint = xscale_add_breakpoint,
120 .remove_breakpoint = xscale_remove_breakpoint,
121 .add_watchpoint = xscale_add_watchpoint,
122 .remove_watchpoint = xscale_remove_watchpoint,
123
124 .register_commands = xscale_register_commands,
125 .target_command = xscale_target_command,
126 .init_target = xscale_init_target,
127 .quit = xscale_quit,
128
129 .virt2phys = xscale_virt2phys,
130 .mmu = xscale_mmu
131 };
132
133 char* xscale_reg_list[] =
134 {
135 "XSCALE_MAINID", /* 0 */
136 "XSCALE_CACHETYPE",
137 "XSCALE_CTRL",
138 "XSCALE_AUXCTRL",
139 "XSCALE_TTB",
140 "XSCALE_DAC",
141 "XSCALE_FSR",
142 "XSCALE_FAR",
143 "XSCALE_PID",
144 "XSCALE_CPACCESS",
145 "XSCALE_IBCR0", /* 10 */
146 "XSCALE_IBCR1",
147 "XSCALE_DBR0",
148 "XSCALE_DBR1",
149 "XSCALE_DBCON",
150 "XSCALE_TBREG",
151 "XSCALE_CHKPT0",
152 "XSCALE_CHKPT1",
153 "XSCALE_DCSR",
154 "XSCALE_TX",
155 "XSCALE_RX", /* 20 */
156 "XSCALE_TXRXCTRL",
157 };
158
159 xscale_reg_t xscale_reg_arch_info[] =
160 {
161 {XSCALE_MAINID, NULL},
162 {XSCALE_CACHETYPE, NULL},
163 {XSCALE_CTRL, NULL},
164 {XSCALE_AUXCTRL, NULL},
165 {XSCALE_TTB, NULL},
166 {XSCALE_DAC, NULL},
167 {XSCALE_FSR, NULL},
168 {XSCALE_FAR, NULL},
169 {XSCALE_PID, NULL},
170 {XSCALE_CPACCESS, NULL},
171 {XSCALE_IBCR0, NULL},
172 {XSCALE_IBCR1, NULL},
173 {XSCALE_DBR0, NULL},
174 {XSCALE_DBR1, NULL},
175 {XSCALE_DBCON, NULL},
176 {XSCALE_TBREG, NULL},
177 {XSCALE_CHKPT0, NULL},
178 {XSCALE_CHKPT1, NULL},
179 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
180 {-1, NULL}, /* TX accessed via JTAG */
181 {-1, NULL}, /* RX accessed via JTAG */
182 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
183 };
184
185 int xscale_reg_arch_type = -1;
186
187 int xscale_get_reg(reg_t *reg);
188 int xscale_set_reg(reg_t *reg, u8 *buf);
189
190 int xscale_get_arch_pointers(target_t *target, armv4_5_common_t **armv4_5_p, xscale_common_t **xscale_p)
191 {
192 armv4_5_common_t *armv4_5 = target->arch_info;
193 xscale_common_t *xscale = armv4_5->arch_info;
194
195 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
196 {
197 LOG_ERROR("target isn't an XScale target");
198 return -1;
199 }
200
201 if (xscale->common_magic != XSCALE_COMMON_MAGIC)
202 {
203 LOG_ERROR("target isn't an XScale target");
204 return -1;
205 }
206
207 *armv4_5_p = armv4_5;
208 *xscale_p = xscale;
209
210 return ERROR_OK;
211 }
212
213 int xscale_jtag_set_instr(int chain_pos, u32 new_instr)
214 {
215 jtag_device_t *device = jtag_get_device(chain_pos);
216
217 if (buf_get_u32(device->cur_instr, 0, device->ir_length) != new_instr)
218 {
219 scan_field_t field;
220
221 field.device = chain_pos;
222 field.num_bits = device->ir_length;
223 field.out_value = calloc(CEIL(field.num_bits, 8), 1);
224 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
225 field.out_mask = NULL;
226 field.in_value = NULL;
227 jtag_set_check_value(&field, device->expected, device->expected_mask, NULL);
228
229 jtag_add_ir_scan(1, &field, -1);
230
231 free(field.out_value);
232 }
233
234 return ERROR_OK;
235 }
236
237 int xscale_read_dcsr(target_t *target)
238 {
239 armv4_5_common_t *armv4_5 = target->arch_info;
240 xscale_common_t *xscale = armv4_5->arch_info;
241
242 int retval;
243
244 scan_field_t fields[3];
245 u8 field0 = 0x0;
246 u8 field0_check_value = 0x2;
247 u8 field0_check_mask = 0x7;
248 u8 field2 = 0x0;
249 u8 field2_check_value = 0x0;
250 u8 field2_check_mask = 0x1;
251
252 jtag_add_end_state(TAP_PD);
253 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dcsr);
254
255 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
256 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
257
258 fields[0].device = xscale->jtag_info.chain_pos;
259 fields[0].num_bits = 3;
260 fields[0].out_value = &field0;
261 fields[0].out_mask = NULL;
262 fields[0].in_value = NULL;
263 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
264
265 fields[1].device = xscale->jtag_info.chain_pos;
266 fields[1].num_bits = 32;
267 fields[1].out_value = NULL;
268 fields[1].out_mask = NULL;
269 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
270 fields[1].in_handler = NULL;
271 fields[1].in_handler_priv = NULL;
272 fields[1].in_check_value = NULL;
273 fields[1].in_check_mask = NULL;
274
275 fields[2].device = xscale->jtag_info.chain_pos;
276 fields[2].num_bits = 1;
277 fields[2].out_value = &field2;
278 fields[2].out_mask = NULL;
279 fields[2].in_value = NULL;
280 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
281
282 jtag_add_dr_scan(3, fields, -1);
283
284 if ((retval = jtag_execute_queue()) != ERROR_OK)
285 {
286 LOG_ERROR("JTAG error while reading DCSR");
287 return retval;
288 }
289
290 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
291 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
292
293 /* write the register with the value we just read
294 * on this second pass, only the first bit of field0 is guaranteed to be 0)
295 */
296 field0_check_mask = 0x1;
297 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
298 fields[1].in_value = NULL;
299
300 jtag_add_end_state(TAP_RTI);
301
302 jtag_add_dr_scan(3, fields, -1);
303
304 /* DANGER!!! this must be here. It will make sure that the arguments
305 * to jtag_set_check_value() does not go out of scope! */
306 return jtag_execute_queue();
307 }
308
309 int xscale_receive(target_t *target, u32 *buffer, int num_words)
310 {
311 if (num_words==0)
312 return ERROR_INVALID_ARGUMENTS;
313
314 int retval=ERROR_OK;
315 armv4_5_common_t *armv4_5 = target->arch_info;
316 xscale_common_t *xscale = armv4_5->arch_info;
317
318 enum tap_state path[3];
319 scan_field_t fields[3];
320
321 u8 *field0 = malloc(num_words * 1);
322 u8 field0_check_value = 0x2;
323 u8 field0_check_mask = 0x6;
324 u32 *field1 = malloc(num_words * 4);
325 u8 field2_check_value = 0x0;
326 u8 field2_check_mask = 0x1;
327 int words_done = 0;
328 int words_scheduled = 0;
329
330 int i;
331
332 path[0] = TAP_SDS;
333 path[1] = TAP_CD;
334 path[2] = TAP_SD;
335
336 fields[0].device = xscale->jtag_info.chain_pos;
337 fields[0].num_bits = 3;
338 fields[0].out_value = NULL;
339 fields[0].out_mask = NULL;
340 fields[0].in_value = NULL;
341 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
342
343 fields[1].device = xscale->jtag_info.chain_pos;
344 fields[1].num_bits = 32;
345 fields[1].out_value = NULL;
346 fields[1].out_mask = NULL;
347 fields[1].in_value = NULL;
348 fields[1].in_handler = NULL;
349 fields[1].in_handler_priv = NULL;
350 fields[1].in_check_value = NULL;
351 fields[1].in_check_mask = NULL;
352
353
354
355 fields[2].device = xscale->jtag_info.chain_pos;
356 fields[2].num_bits = 1;
357 fields[2].out_value = NULL;
358 fields[2].out_mask = NULL;
359 fields[2].in_value = NULL;
360 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
361
362 jtag_add_end_state(TAP_RTI);
363 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgtx);
364 jtag_add_runtest(1, -1); /* ensures that we're in the TAP_RTI state as the above could be a no-op */
365
366 /* repeat until all words have been collected */
367 int attempts=0;
368 while (words_done < num_words)
369 {
370 /* schedule reads */
371 words_scheduled = 0;
372 for (i = words_done; i < num_words; i++)
373 {
374 fields[0].in_value = &field0[i];
375 fields[1].in_handler = buf_to_u32_handler;
376 fields[1].in_handler_priv = (u8*)&field1[i];
377
378 jtag_add_pathmove(3, path);
379 jtag_add_dr_scan(3, fields, TAP_RTI);
380 words_scheduled++;
381 }
382
383 if ((retval = jtag_execute_queue()) != ERROR_OK)
384 {
385 LOG_ERROR("JTAG error while receiving data from debug handler");
386 break;
387 }
388
389 /* examine results */
390 for (i = words_done; i < num_words; i++)
391 {
392 if (!(field0[0] & 1))
393 {
394 /* move backwards if necessary */
395 int j;
396 for (j = i; j < num_words - 1; j++)
397 {
398 field0[j] = field0[j+1];
399 field1[j] = field1[j+1];
400 }
401 words_scheduled--;
402 }
403 }
404 if (words_scheduled==0)
405 {
406 if (attempts++==1000)
407 {
408 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
409 retval=ERROR_TARGET_TIMEOUT;
410 break;
411 }
412 }
413
414 words_done += words_scheduled;
415 }
416
417 for (i = 0; i < num_words; i++)
418 *(buffer++) = buf_get_u32((u8*)&field1[i], 0, 32);
419
420 free(field1);
421
422 return retval;
423 }
424
425 int xscale_read_tx(target_t *target, int consume)
426 {
427 armv4_5_common_t *armv4_5 = target->arch_info;
428 xscale_common_t *xscale = armv4_5->arch_info;
429 enum tap_state path[3];
430 enum tap_state noconsume_path[6];
431
432 int retval;
433 struct timeval timeout, now;
434
435 scan_field_t fields[3];
436 u8 field0_in = 0x0;
437 u8 field0_check_value = 0x2;
438 u8 field0_check_mask = 0x6;
439 u8 field2_check_value = 0x0;
440 u8 field2_check_mask = 0x1;
441
442 jtag_add_end_state(TAP_RTI);
443
444 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgtx);
445
446 path[0] = TAP_SDS;
447 path[1] = TAP_CD;
448 path[2] = TAP_SD;
449
450 noconsume_path[0] = TAP_SDS;
451 noconsume_path[1] = TAP_CD;
452 noconsume_path[2] = TAP_E1D;
453 noconsume_path[3] = TAP_PD;
454 noconsume_path[4] = TAP_E2D;
455 noconsume_path[5] = TAP_SD;
456
457 fields[0].device = xscale->jtag_info.chain_pos;
458 fields[0].num_bits = 3;
459 fields[0].out_value = NULL;
460 fields[0].out_mask = NULL;
461 fields[0].in_value = &field0_in;
462 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
463
464 fields[1].device = xscale->jtag_info.chain_pos;
465 fields[1].num_bits = 32;
466 fields[1].out_value = NULL;
467 fields[1].out_mask = NULL;
468 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
469 fields[1].in_handler = NULL;
470 fields[1].in_handler_priv = NULL;
471 fields[1].in_check_value = NULL;
472 fields[1].in_check_mask = NULL;
473
474
475
476 fields[2].device = xscale->jtag_info.chain_pos;
477 fields[2].num_bits = 1;
478 fields[2].out_value = NULL;
479 fields[2].out_mask = NULL;
480 fields[2].in_value = NULL;
481 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
482
483 gettimeofday(&timeout, NULL);
484 timeval_add_time(&timeout, 1, 0);
485
486 for (;;)
487 {
488 int i;
489 for (i=0; i<100; i++)
490 {
491 /* if we want to consume the register content (i.e. clear TX_READY),
492 * we have to go straight from Capture-DR to Shift-DR
493 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
494 */
495 if (consume)
496 jtag_add_pathmove(3, path);
497 else
498 {
499 jtag_add_pathmove(sizeof(noconsume_path)/sizeof(*noconsume_path), noconsume_path);
500 }
501
502 jtag_add_dr_scan(3, fields, TAP_RTI);
503
504 if ((retval = jtag_execute_queue()) != ERROR_OK)
505 {
506 LOG_ERROR("JTAG error while reading TX");
507 return ERROR_TARGET_TIMEOUT;
508 }
509
510 gettimeofday(&now, NULL);
511 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
512 {
513 LOG_ERROR("time out reading TX register");
514 return ERROR_TARGET_TIMEOUT;
515 }
516 if (!((!(field0_in & 1)) && consume))
517 {
518 goto done;
519 }
520 }
521 LOG_DEBUG("waiting 10ms");
522 usleep(10*1000); /* avoid flooding the logs */
523 }
524 done:
525
526 if (!(field0_in & 1))
527 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
528
529 return ERROR_OK;
530 }
531
532 int xscale_write_rx(target_t *target)
533 {
534 armv4_5_common_t *armv4_5 = target->arch_info;
535 xscale_common_t *xscale = armv4_5->arch_info;
536
537 int retval;
538 struct timeval timeout, now;
539
540 scan_field_t fields[3];
541 u8 field0_out = 0x0;
542 u8 field0_in = 0x0;
543 u8 field0_check_value = 0x2;
544 u8 field0_check_mask = 0x6;
545 u8 field2 = 0x0;
546 u8 field2_check_value = 0x0;
547 u8 field2_check_mask = 0x1;
548
549 jtag_add_end_state(TAP_RTI);
550
551 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgrx);
552
553 fields[0].device = xscale->jtag_info.chain_pos;
554 fields[0].num_bits = 3;
555 fields[0].out_value = &field0_out;
556 fields[0].out_mask = NULL;
557 fields[0].in_value = &field0_in;
558 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
559
560 fields[1].device = xscale->jtag_info.chain_pos;
561 fields[1].num_bits = 32;
562 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
563 fields[1].out_mask = NULL;
564 fields[1].in_value = NULL;
565 fields[1].in_handler = NULL;
566 fields[1].in_handler_priv = NULL;
567 fields[1].in_check_value = NULL;
568 fields[1].in_check_mask = NULL;
569
570
571
572 fields[2].device = xscale->jtag_info.chain_pos;
573 fields[2].num_bits = 1;
574 fields[2].out_value = &field2;
575 fields[2].out_mask = NULL;
576 fields[2].in_value = NULL;
577 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
578
579 gettimeofday(&timeout, NULL);
580 timeval_add_time(&timeout, 1, 0);
581
582 /* poll until rx_read is low */
583 LOG_DEBUG("polling RX");
584 for (;;)
585 {
586 int i;
587 for (i=0; i<10; i++)
588 {
589 jtag_add_dr_scan(3, fields, TAP_RTI);
590
591 if ((retval = jtag_execute_queue()) != ERROR_OK)
592 {
593 LOG_ERROR("JTAG error while writing RX");
594 return retval;
595 }
596
597 gettimeofday(&now, NULL);
598 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
599 {
600 LOG_ERROR("time out writing RX register");
601 return ERROR_TARGET_TIMEOUT;
602 }
603 if (!(field0_in & 1))
604 goto done;
605 }
606 LOG_DEBUG("waiting 10ms");
607 usleep(10*1000); /* wait 10ms to avoid flooding the logs */
608 }
609 done:
610
611 /* set rx_valid */
612 field2 = 0x1;
613 jtag_add_dr_scan(3, fields, TAP_RTI);
614
615 if ((retval = jtag_execute_queue()) != ERROR_OK)
616 {
617 LOG_ERROR("JTAG error while writing RX");
618 return retval;
619 }
620
621 return ERROR_OK;
622 }
623
624 /* send count elements of size byte to the debug handler */
625 int xscale_send(target_t *target, u8 *buffer, int count, int size)
626 {
627 armv4_5_common_t *armv4_5 = target->arch_info;
628 xscale_common_t *xscale = armv4_5->arch_info;
629
630 int retval;
631
632 int done_count = 0;
633 u8 output[4] = {0, 0, 0, 0};
634
635 scan_field_t fields[3];
636 u8 field0_out = 0x0;
637 u8 field0_check_value = 0x2;
638 u8 field0_check_mask = 0x6;
639 u8 field2 = 0x1;
640 u8 field2_check_value = 0x0;
641 u8 field2_check_mask = 0x1;
642
643 jtag_add_end_state(TAP_RTI);
644
645 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgrx);
646
647 fields[0].device = xscale->jtag_info.chain_pos;
648 fields[0].num_bits = 3;
649 fields[0].out_value = &field0_out;
650 fields[0].out_mask = NULL;
651 fields[0].in_handler = NULL;
652 fields[0].in_value = NULL;
653 if (!xscale->fast_memory_access)
654 {
655 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
656 }
657
658 fields[1].device = xscale->jtag_info.chain_pos;
659 fields[1].num_bits = 32;
660 fields[1].out_value = output;
661 fields[1].out_mask = NULL;
662 fields[1].in_value = NULL;
663 fields[1].in_handler = NULL;
664 fields[1].in_handler_priv = NULL;
665 fields[1].in_check_value = NULL;
666 fields[1].in_check_mask = NULL;
667
668
669
670 fields[2].device = xscale->jtag_info.chain_pos;
671 fields[2].num_bits = 1;
672 fields[2].out_value = &field2;
673 fields[2].out_mask = NULL;
674 fields[2].in_value = NULL;
675 fields[2].in_handler = NULL;
676 if (!xscale->fast_memory_access)
677 {
678 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
679 }
680
681 if (size==4)
682 {
683 int endianness = target->endianness;
684 while (done_count++ < count)
685 {
686 if (endianness == TARGET_LITTLE_ENDIAN)
687 {
688 output[0]=buffer[0];
689 output[1]=buffer[1];
690 output[2]=buffer[2];
691 output[3]=buffer[3];
692 } else
693 {
694 output[0]=buffer[3];
695 output[1]=buffer[2];
696 output[2]=buffer[1];
697 output[3]=buffer[0];
698 }
699 jtag_add_dr_scan(3, fields, TAP_RTI);
700 buffer += size;
701 }
702
703 } else
704 {
705 while (done_count++ < count)
706 {
707 /* extract sized element from target-endian buffer, and put it
708 * into little-endian output buffer
709 */
710 switch (size)
711 {
712 case 2:
713 buf_set_u32(output, 0, 32, target_buffer_get_u16(target, buffer));
714 break;
715 case 1:
716 output[0] = *buffer;
717 break;
718 default:
719 LOG_ERROR("BUG: size neither 4, 2 nor 1");
720 exit(-1);
721 }
722
723 jtag_add_dr_scan(3, fields, TAP_RTI);
724 buffer += size;
725 }
726
727 }
728
729 if ((retval = jtag_execute_queue()) != ERROR_OK)
730 {
731 LOG_ERROR("JTAG error while sending data to debug handler");
732 return retval;
733 }
734
735 return ERROR_OK;
736 }
737
738 int xscale_send_u32(target_t *target, u32 value)
739 {
740 armv4_5_common_t *armv4_5 = target->arch_info;
741 xscale_common_t *xscale = armv4_5->arch_info;
742
743 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
744 return xscale_write_rx(target);
745 }
746
747 int xscale_write_dcsr(target_t *target, int hold_rst, int ext_dbg_brk)
748 {
749 armv4_5_common_t *armv4_5 = target->arch_info;
750 xscale_common_t *xscale = armv4_5->arch_info;
751
752 int retval;
753
754 scan_field_t fields[3];
755 u8 field0 = 0x0;
756 u8 field0_check_value = 0x2;
757 u8 field0_check_mask = 0x7;
758 u8 field2 = 0x0;
759 u8 field2_check_value = 0x0;
760 u8 field2_check_mask = 0x1;
761
762 if (hold_rst != -1)
763 xscale->hold_rst = hold_rst;
764
765 if (ext_dbg_brk != -1)
766 xscale->external_debug_break = ext_dbg_brk;
767
768 jtag_add_end_state(TAP_RTI);
769 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dcsr);
770
771 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
772 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
773
774 fields[0].device = xscale->jtag_info.chain_pos;
775 fields[0].num_bits = 3;
776 fields[0].out_value = &field0;
777 fields[0].out_mask = NULL;
778 fields[0].in_value = NULL;
779 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
780
781 fields[1].device = xscale->jtag_info.chain_pos;
782 fields[1].num_bits = 32;
783 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
784 fields[1].out_mask = NULL;
785 fields[1].in_value = NULL;
786 fields[1].in_handler = NULL;
787 fields[1].in_handler_priv = NULL;
788 fields[1].in_check_value = NULL;
789 fields[1].in_check_mask = NULL;
790
791
792
793 fields[2].device = xscale->jtag_info.chain_pos;
794 fields[2].num_bits = 1;
795 fields[2].out_value = &field2;
796 fields[2].out_mask = NULL;
797 fields[2].in_value = NULL;
798 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
799
800 jtag_add_dr_scan(3, fields, -1);
801
802 if ((retval = jtag_execute_queue()) != ERROR_OK)
803 {
804 LOG_ERROR("JTAG error while writing DCSR");
805 return retval;
806 }
807
808 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
809 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
810
811 return ERROR_OK;
812 }
813
814 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
815 unsigned int parity (unsigned int v)
816 {
817 unsigned int ov = v;
818 v ^= v >> 16;
819 v ^= v >> 8;
820 v ^= v >> 4;
821 v &= 0xf;
822 LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
823 return (0x6996 >> v) & 1;
824 }
825
826 int xscale_load_ic(target_t *target, int mini, u32 va, u32 buffer[8])
827 {
828 armv4_5_common_t *armv4_5 = target->arch_info;
829 xscale_common_t *xscale = armv4_5->arch_info;
830 u8 packet[4];
831 u8 cmd;
832 int word;
833
834 scan_field_t fields[2];
835
836 LOG_DEBUG("loading miniIC at 0x%8.8x", va);
837
838 jtag_add_end_state(TAP_RTI);
839 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.ldic); /* LDIC */
840
841 /* CMD is b010 for Main IC and b011 for Mini IC */
842 if (mini)
843 buf_set_u32(&cmd, 0, 3, 0x3);
844 else
845 buf_set_u32(&cmd, 0, 3, 0x2);
846
847 buf_set_u32(&cmd, 3, 3, 0x0);
848
849 /* virtual address of desired cache line */
850 buf_set_u32(packet, 0, 27, va >> 5);
851
852 fields[0].device = xscale->jtag_info.chain_pos;
853 fields[0].num_bits = 6;
854 fields[0].out_value = &cmd;
855 fields[0].out_mask = NULL;
856 fields[0].in_value = NULL;
857 fields[0].in_check_value = NULL;
858 fields[0].in_check_mask = NULL;
859 fields[0].in_handler = NULL;
860 fields[0].in_handler_priv = NULL;
861
862 fields[1].device = xscale->jtag_info.chain_pos;
863 fields[1].num_bits = 27;
864 fields[1].out_value = packet;
865 fields[1].out_mask = NULL;
866 fields[1].in_value = NULL;
867 fields[1].in_check_value = NULL;
868 fields[1].in_check_mask = NULL;
869 fields[1].in_handler = NULL;
870 fields[1].in_handler_priv = NULL;
871
872 jtag_add_dr_scan(2, fields, -1);
873
874 fields[0].num_bits = 32;
875 fields[0].out_value = packet;
876
877 fields[1].num_bits = 1;
878 fields[1].out_value = &cmd;
879
880 for (word = 0; word < 8; word++)
881 {
882 buf_set_u32(packet, 0, 32, buffer[word]);
883 cmd = parity(*((u32*)packet));
884 jtag_add_dr_scan(2, fields, -1);
885 }
886
887 jtag_execute_queue();
888
889 return ERROR_OK;
890 }
891
892 int xscale_invalidate_ic_line(target_t *target, u32 va)
893 {
894 armv4_5_common_t *armv4_5 = target->arch_info;
895 xscale_common_t *xscale = armv4_5->arch_info;
896 u8 packet[4];
897 u8 cmd;
898
899 scan_field_t fields[2];
900
901 jtag_add_end_state(TAP_RTI);
902 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.ldic); /* LDIC */
903
904 /* CMD for invalidate IC line b000, bits [6:4] b000 */
905 buf_set_u32(&cmd, 0, 6, 0x0);
906
907 /* virtual address of desired cache line */
908 buf_set_u32(packet, 0, 27, va >> 5);
909
910 fields[0].device = xscale->jtag_info.chain_pos;
911 fields[0].num_bits = 6;
912 fields[0].out_value = &cmd;
913 fields[0].out_mask = NULL;
914 fields[0].in_value = NULL;
915 fields[0].in_check_value = NULL;
916 fields[0].in_check_mask = NULL;
917 fields[0].in_handler = NULL;
918 fields[0].in_handler_priv = NULL;
919
920 fields[1].device = xscale->jtag_info.chain_pos;
921 fields[1].num_bits = 27;
922 fields[1].out_value = packet;
923 fields[1].out_mask = NULL;
924 fields[1].in_value = NULL;
925 fields[1].in_check_value = NULL;
926 fields[1].in_check_mask = NULL;
927 fields[1].in_handler = NULL;
928 fields[1].in_handler_priv = NULL;
929
930 jtag_add_dr_scan(2, fields, -1);
931
932 return ERROR_OK;
933 }
934
935 int xscale_update_vectors(target_t *target)
936 {
937 armv4_5_common_t *armv4_5 = target->arch_info;
938 xscale_common_t *xscale = armv4_5->arch_info;
939 int i;
940 int retval;
941
942 u32 low_reset_branch, high_reset_branch;
943
944 for (i = 1; i < 8; i++)
945 {
946 /* if there's a static vector specified for this exception, override */
947 if (xscale->static_high_vectors_set & (1 << i))
948 {
949 xscale->high_vectors[i] = xscale->static_high_vectors[i];
950 }
951 else
952 {
953 retval=target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
954 if (retval == ERROR_TARGET_TIMEOUT)
955 return retval;
956 if (retval!=ERROR_OK)
957 {
958 /* Some of these reads will fail as part of normal execution */
959 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
960 }
961 }
962 }
963
964 for (i = 1; i < 8; i++)
965 {
966 if (xscale->static_low_vectors_set & (1 << i))
967 {
968 xscale->low_vectors[i] = xscale->static_low_vectors[i];
969 }
970 else
971 {
972 retval=target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
973 if (retval == ERROR_TARGET_TIMEOUT)
974 return retval;
975 if (retval!=ERROR_OK)
976 {
977 /* Some of these reads will fail as part of normal execution */
978 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
979 }
980 }
981 }
982
983 /* calculate branches to debug handler */
984 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
985 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
986
987 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
988 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
989
990 /* invalidate and load exception vectors in mini i-cache */
991 xscale_invalidate_ic_line(target, 0x0);
992 xscale_invalidate_ic_line(target, 0xffff0000);
993
994 xscale_load_ic(target, 1, 0x0, xscale->low_vectors);
995 xscale_load_ic(target, 1, 0xffff0000, xscale->high_vectors);
996
997 return ERROR_OK;
998 }
999
1000 int xscale_arch_state(struct target_s *target)
1001 {
1002 armv4_5_common_t *armv4_5 = target->arch_info;
1003 xscale_common_t *xscale = armv4_5->arch_info;
1004
1005 char *state[] =
1006 {
1007 "disabled", "enabled"
1008 };
1009
1010 char *arch_dbg_reason[] =
1011 {
1012 "", "\n(processor reset)", "\n(trace buffer full)"
1013 };
1014
1015 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
1016 {
1017 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
1018 exit(-1);
1019 }
1020
1021 LOG_USER("target halted in %s state due to %s, current mode: %s\n"
1022 "cpsr: 0x%8.8x pc: 0x%8.8x\n"
1023 "MMU: %s, D-Cache: %s, I-Cache: %s"
1024 "%s",
1025 armv4_5_state_strings[armv4_5->core_state],
1026 target_debug_reason_strings[target->debug_reason],
1027 armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)],
1028 buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32),
1029 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32),
1030 state[xscale->armv4_5_mmu.mmu_enabled],
1031 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
1032 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
1033 arch_dbg_reason[xscale->arch_debug_reason]);
1034
1035 return ERROR_OK;
1036 }
1037
1038 int xscale_poll(target_t *target)
1039 {
1040 int retval=ERROR_OK;
1041 armv4_5_common_t *armv4_5 = target->arch_info;
1042 xscale_common_t *xscale = armv4_5->arch_info;
1043
1044 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
1045 {
1046 enum target_state previous_state = target->state;
1047 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
1048 {
1049
1050 /* there's data to read from the tx register, we entered debug state */
1051 xscale->handler_running = 1;
1052
1053 target->state = TARGET_HALTED;
1054
1055 /* process debug entry, fetching current mode regs */
1056 retval = xscale_debug_entry(target);
1057 }
1058 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
1059 {
1060 LOG_USER("error while polling TX register, reset CPU");
1061 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
1062 target->state = TARGET_HALTED;
1063 }
1064
1065 /* debug_entry could have overwritten target state (i.e. immediate resume)
1066 * don't signal event handlers in that case
1067 */
1068 if (target->state != TARGET_HALTED)
1069 return ERROR_OK;
1070
1071 /* if target was running, signal that we halted
1072 * otherwise we reentered from debug execution */
1073 if (previous_state == TARGET_RUNNING)
1074 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1075 else
1076 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
1077 }
1078
1079 return retval;
1080 }
1081
1082 int xscale_debug_entry(target_t *target)
1083 {
1084 armv4_5_common_t *armv4_5 = target->arch_info;
1085 xscale_common_t *xscale = armv4_5->arch_info;
1086 u32 pc;
1087 u32 buffer[10];
1088 int i;
1089 int retval;
1090
1091 u32 moe;
1092
1093 /* clear external dbg break (will be written on next DCSR read) */
1094 xscale->external_debug_break = 0;
1095 if ((retval=xscale_read_dcsr(target))!=ERROR_OK)
1096 return retval;
1097
1098 /* get r0, pc, r1 to r7 and cpsr */
1099 if ((retval=xscale_receive(target, buffer, 10))!=ERROR_OK)
1100 return retval;
1101
1102 /* move r0 from buffer to register cache */
1103 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
1104 armv4_5->core_cache->reg_list[15].dirty = 1;
1105 armv4_5->core_cache->reg_list[15].valid = 1;
1106 LOG_DEBUG("r0: 0x%8.8x", buffer[0]);
1107
1108 /* move pc from buffer to register cache */
1109 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
1110 armv4_5->core_cache->reg_list[15].dirty = 1;
1111 armv4_5->core_cache->reg_list[15].valid = 1;
1112 LOG_DEBUG("pc: 0x%8.8x", buffer[1]);
1113
1114 /* move data from buffer to register cache */
1115 for (i = 1; i <= 7; i++)
1116 {
1117 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
1118 armv4_5->core_cache->reg_list[i].dirty = 1;
1119 armv4_5->core_cache->reg_list[i].valid = 1;
1120 LOG_DEBUG("r%i: 0x%8.8x", i, buffer[i + 1]);
1121 }
1122
1123 buf_set_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32, buffer[9]);
1124 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty = 1;
1125 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].valid = 1;
1126 LOG_DEBUG("cpsr: 0x%8.8x", buffer[9]);
1127
1128 armv4_5->core_mode = buffer[9] & 0x1f;
1129 if (armv4_5_mode_to_number(armv4_5->core_mode) == -1)
1130 {
1131 target->state = TARGET_UNKNOWN;
1132 LOG_ERROR("cpsr contains invalid mode value - communication failure");
1133 return ERROR_TARGET_FAILURE;
1134 }
1135 LOG_DEBUG("target entered debug state in %s mode", armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)]);
1136
1137 if (buffer[9] & 0x20)
1138 armv4_5->core_state = ARMV4_5_STATE_THUMB;
1139 else
1140 armv4_5->core_state = ARMV4_5_STATE_ARM;
1141
1142 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1143 if ((armv4_5->core_mode != ARMV4_5_MODE_USR) && (armv4_5->core_mode != ARMV4_5_MODE_SYS))
1144 {
1145 xscale_receive(target, buffer, 8);
1146 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1147 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).dirty = 0;
1148 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).valid = 1;
1149 }
1150 else
1151 {
1152 /* r8 to r14, but no spsr */
1153 xscale_receive(target, buffer, 7);
1154 }
1155
1156 /* move data from buffer to register cache */
1157 for (i = 8; i <= 14; i++)
1158 {
1159 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).value, 0, 32, buffer[i - 8]);
1160 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 0;
1161 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid = 1;
1162 }
1163
1164 /* examine debug reason */
1165 xscale_read_dcsr(target);
1166 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
1167
1168 /* stored PC (for calculating fixup) */
1169 pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1170
1171 switch (moe)
1172 {
1173 case 0x0: /* Processor reset */
1174 target->debug_reason = DBG_REASON_DBGRQ;
1175 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
1176 pc -= 4;
1177 break;
1178 case 0x1: /* Instruction breakpoint hit */
1179 target->debug_reason = DBG_REASON_BREAKPOINT;
1180 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1181 pc -= 4;
1182 break;
1183 case 0x2: /* Data breakpoint hit */
1184 target->debug_reason = DBG_REASON_WATCHPOINT;
1185 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1186 pc -= 4;
1187 break;
1188 case 0x3: /* BKPT instruction executed */
1189 target->debug_reason = DBG_REASON_BREAKPOINT;
1190 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1191 pc -= 4;
1192 break;
1193 case 0x4: /* Ext. debug event */
1194 target->debug_reason = DBG_REASON_DBGRQ;
1195 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1196 pc -= 4;
1197 break;
1198 case 0x5: /* Vector trap occured */
1199 target->debug_reason = DBG_REASON_BREAKPOINT;
1200 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1201 pc -= 4;
1202 break;
1203 case 0x6: /* Trace buffer full break */
1204 target->debug_reason = DBG_REASON_DBGRQ;
1205 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1206 pc -= 4;
1207 break;
1208 case 0x7: /* Reserved */
1209 default:
1210 LOG_ERROR("Method of Entry is 'Reserved'");
1211 exit(-1);
1212 break;
1213 }
1214
1215 /* apply PC fixup */
1216 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
1217
1218 /* on the first debug entry, identify cache type */
1219 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1220 {
1221 u32 cache_type_reg;
1222
1223 /* read cp15 cache type register */
1224 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1225 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1226
1227 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1228 }
1229
1230 /* examine MMU and Cache settings */
1231 /* read cp15 control register */
1232 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1233 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1234 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1235 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1236 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1237
1238 /* tracing enabled, read collected trace data */
1239 if (xscale->trace.buffer_enabled)
1240 {
1241 xscale_read_trace(target);
1242 xscale->trace.buffer_fill--;
1243
1244 /* resume if we're still collecting trace data */
1245 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1246 && (xscale->trace.buffer_fill > 0))
1247 {
1248 xscale_resume(target, 1, 0x0, 1, 0);
1249 }
1250 else
1251 {
1252 xscale->trace.buffer_enabled = 0;
1253 }
1254 }
1255
1256 return ERROR_OK;
1257 }
1258
1259 int xscale_halt(target_t *target)
1260 {
1261 armv4_5_common_t *armv4_5 = target->arch_info;
1262 xscale_common_t *xscale = armv4_5->arch_info;
1263
1264 LOG_DEBUG("target->state: %s", target_state_strings[target->state]);
1265
1266 if (target->state == TARGET_HALTED)
1267 {
1268 LOG_WARNING("target was already halted");
1269 return ERROR_OK;
1270 }
1271 else if (target->state == TARGET_UNKNOWN)
1272 {
1273 /* this must not happen for a xscale target */
1274 LOG_ERROR("target was in unknown state when halt was requested");
1275 return ERROR_TARGET_INVALID;
1276 }
1277 else if (target->state == TARGET_RESET)
1278 {
1279 LOG_DEBUG("target->state == TARGET_RESET");
1280 }
1281 else
1282 {
1283 /* assert external dbg break */
1284 xscale->external_debug_break = 1;
1285 xscale_read_dcsr(target);
1286
1287 target->debug_reason = DBG_REASON_DBGRQ;
1288 }
1289
1290 return ERROR_OK;
1291 }
1292
1293 int xscale_enable_single_step(struct target_s *target, u32 next_pc)
1294 {
1295 armv4_5_common_t *armv4_5 = target->arch_info;
1296 xscale_common_t *xscale= armv4_5->arch_info;
1297 reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1298
1299 if (xscale->ibcr0_used)
1300 {
1301 breakpoint_t *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1302
1303 if (ibcr0_bp)
1304 {
1305 xscale_unset_breakpoint(target, ibcr0_bp);
1306 }
1307 else
1308 {
1309 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1310 exit(-1);
1311 }
1312 }
1313
1314 xscale_set_reg_u32(ibcr0, next_pc | 0x1);
1315
1316 return ERROR_OK;
1317 }
1318
1319 int xscale_disable_single_step(struct target_s *target)
1320 {
1321 armv4_5_common_t *armv4_5 = target->arch_info;
1322 xscale_common_t *xscale= armv4_5->arch_info;
1323 reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1324
1325 xscale_set_reg_u32(ibcr0, 0x0);
1326
1327 return ERROR_OK;
1328 }
1329
1330 int xscale_resume(struct target_s *target, int current, u32 address, int handle_breakpoints, int debug_execution)
1331 {
1332 armv4_5_common_t *armv4_5 = target->arch_info;
1333 xscale_common_t *xscale= armv4_5->arch_info;
1334 breakpoint_t *breakpoint = target->breakpoints;
1335
1336 u32 current_pc;
1337
1338 int retval;
1339 int i;
1340
1341 LOG_DEBUG("-");
1342
1343 if (target->state != TARGET_HALTED)
1344 {
1345 LOG_WARNING("target not halted");
1346 return ERROR_TARGET_NOT_HALTED;
1347 }
1348
1349 if (!debug_execution)
1350 {
1351 target_free_all_working_areas(target);
1352 }
1353
1354 /* update vector tables */
1355 if ((retval=xscale_update_vectors(target))!=ERROR_OK)
1356 return retval;
1357
1358 /* current = 1: continue on current pc, otherwise continue at <address> */
1359 if (!current)
1360 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1361
1362 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1363
1364 /* if we're at the reset vector, we have to simulate the branch */
1365 if (current_pc == 0x0)
1366 {
1367 arm_simulate_step(target, NULL);
1368 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1369 }
1370
1371 /* the front-end may request us not to handle breakpoints */
1372 if (handle_breakpoints)
1373 {
1374 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1375 {
1376 u32 next_pc;
1377
1378 /* there's a breakpoint at the current PC, we have to step over it */
1379 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1380 xscale_unset_breakpoint(target, breakpoint);
1381
1382 /* calculate PC of next instruction */
1383 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1384 {
1385 u32 current_opcode;
1386 target_read_u32(target, current_pc, &current_opcode);
1387 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8x", current_opcode);
1388 }
1389
1390 LOG_DEBUG("enable single-step");
1391 xscale_enable_single_step(target, next_pc);
1392
1393 /* restore banked registers */
1394 xscale_restore_context(target);
1395
1396 /* send resume request (command 0x30 or 0x31)
1397 * clean the trace buffer if it is to be enabled (0x62) */
1398 if (xscale->trace.buffer_enabled)
1399 {
1400 xscale_send_u32(target, 0x62);
1401 xscale_send_u32(target, 0x31);
1402 }
1403 else
1404 xscale_send_u32(target, 0x30);
1405
1406 /* send CPSR */
1407 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1408 LOG_DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1409
1410 for (i = 7; i >= 0; i--)
1411 {
1412 /* send register */
1413 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1414 LOG_DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1415 }
1416
1417 /* send PC */
1418 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1419 LOG_DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1420
1421 /* wait for and process debug entry */
1422 xscale_debug_entry(target);
1423
1424 LOG_DEBUG("disable single-step");
1425 xscale_disable_single_step(target);
1426
1427 LOG_DEBUG("set breakpoint at 0x%8.8x", breakpoint->address);
1428 xscale_set_breakpoint(target, breakpoint);
1429 }
1430 }
1431
1432 /* enable any pending breakpoints and watchpoints */
1433 xscale_enable_breakpoints(target);
1434 xscale_enable_watchpoints(target);
1435
1436 /* restore banked registers */
1437 xscale_restore_context(target);
1438
1439 /* send resume request (command 0x30 or 0x31)
1440 * clean the trace buffer if it is to be enabled (0x62) */
1441 if (xscale->trace.buffer_enabled)
1442 {
1443 xscale_send_u32(target, 0x62);
1444 xscale_send_u32(target, 0x31);
1445 }
1446 else
1447 xscale_send_u32(target, 0x30);
1448
1449 /* send CPSR */
1450 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1451 LOG_DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1452
1453 for (i = 7; i >= 0; i--)
1454 {
1455 /* send register */
1456 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1457 LOG_DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1458 }
1459
1460 /* send PC */
1461 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1462 LOG_DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1463
1464 target->debug_reason = DBG_REASON_NOTHALTED;
1465
1466 if (!debug_execution)
1467 {
1468 /* registers are now invalid */
1469 armv4_5_invalidate_core_regs(target);
1470 target->state = TARGET_RUNNING;
1471 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1472 }
1473 else
1474 {
1475 target->state = TARGET_DEBUG_RUNNING;
1476 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1477 }
1478
1479 LOG_DEBUG("target resumed");
1480
1481 xscale->handler_running = 1;
1482
1483 return ERROR_OK;
1484 }
1485
1486 int xscale_step(struct target_s *target, int current, u32 address, int handle_breakpoints)
1487 {
1488 armv4_5_common_t *armv4_5 = target->arch_info;
1489 xscale_common_t *xscale = armv4_5->arch_info;
1490 breakpoint_t *breakpoint = target->breakpoints;
1491
1492 u32 current_pc, next_pc;
1493 int i;
1494 int retval;
1495
1496 if (target->state != TARGET_HALTED)
1497 {
1498 LOG_WARNING("target not halted");
1499 return ERROR_TARGET_NOT_HALTED;
1500 }
1501
1502 /* current = 1: continue on current pc, otherwise continue at <address> */
1503 if (!current)
1504 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1505
1506 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1507
1508 /* if we're at the reset vector, we have to simulate the step */
1509 if (current_pc == 0x0)
1510 {
1511 arm_simulate_step(target, NULL);
1512 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1513
1514 target->debug_reason = DBG_REASON_SINGLESTEP;
1515 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1516
1517 return ERROR_OK;
1518 }
1519
1520 /* the front-end may request us not to handle breakpoints */
1521 if (handle_breakpoints)
1522 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1523 {
1524 xscale_unset_breakpoint(target, breakpoint);
1525 }
1526
1527 target->debug_reason = DBG_REASON_SINGLESTEP;
1528
1529 /* calculate PC of next instruction */
1530 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1531 {
1532 u32 current_opcode;
1533 target_read_u32(target, current_pc, &current_opcode);
1534 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8x", current_opcode);
1535 }
1536
1537 LOG_DEBUG("enable single-step");
1538 xscale_enable_single_step(target, next_pc);
1539
1540 /* restore banked registers */
1541 xscale_restore_context(target);
1542
1543 /* send resume request (command 0x30 or 0x31)
1544 * clean the trace buffer if it is to be enabled (0x62) */
1545 if (xscale->trace.buffer_enabled)
1546 {
1547 xscale_send_u32(target, 0x62);
1548 xscale_send_u32(target, 0x31);
1549 }
1550 else
1551 xscale_send_u32(target, 0x30);
1552
1553 /* send CPSR */
1554 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1555 LOG_DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1556
1557 for (i = 7; i >= 0; i--)
1558 {
1559 /* send register */
1560 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1561 LOG_DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1562 }
1563
1564 /* send PC */
1565 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1566 LOG_DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1567
1568 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1569
1570 /* registers are now invalid */
1571 armv4_5_invalidate_core_regs(target);
1572
1573 /* wait for and process debug entry */
1574 xscale_debug_entry(target);
1575
1576 LOG_DEBUG("disable single-step");
1577 xscale_disable_single_step(target);
1578
1579 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1580
1581 if (breakpoint)
1582 {
1583 xscale_set_breakpoint(target, breakpoint);
1584 }
1585
1586 LOG_DEBUG("target stepped");
1587
1588 return ERROR_OK;
1589
1590 }
1591
1592 int xscale_assert_reset(target_t *target)
1593 {
1594 armv4_5_common_t *armv4_5 = target->arch_info;
1595 xscale_common_t *xscale = armv4_5->arch_info;
1596
1597 LOG_DEBUG("target->state: %s", target_state_strings[target->state]);
1598
1599 /* TRST every time. We want to be able to support daemon_startup attach */
1600 jtag_add_reset(1, 0);
1601 jtag_add_sleep(5000);
1602 jtag_add_reset(0, 0);
1603 jtag_add_sleep(5000);
1604 jtag_execute_queue();
1605
1606
1607
1608 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1609 * end up in T-L-R, which would reset JTAG
1610 */
1611 jtag_add_end_state(TAP_RTI);
1612 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dcsr);
1613
1614 /* set Hold reset, Halt mode and Trap Reset */
1615 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1616 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1617 xscale_write_dcsr(target, 1, 0);
1618
1619 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1620 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, 0x7f);
1621 jtag_execute_queue();
1622
1623 /* assert reset */
1624 jtag_add_reset(0, 1);
1625
1626 /* sleep 1ms, to be sure we fulfill any requirements */
1627 jtag_add_sleep(1000);
1628 jtag_execute_queue();
1629
1630 target->state = TARGET_RESET;
1631
1632 return ERROR_OK;
1633 }
1634
1635 int xscale_deassert_reset(target_t *target)
1636 {
1637 armv4_5_common_t *armv4_5 = target->arch_info;
1638 xscale_common_t *xscale = armv4_5->arch_info;
1639
1640 fileio_t debug_handler;
1641 u32 address;
1642 u32 binary_size;
1643
1644 u32 buf_cnt;
1645 int i;
1646 int retval;
1647
1648 breakpoint_t *breakpoint = target->breakpoints;
1649
1650 LOG_DEBUG("-");
1651
1652 xscale->ibcr_available = 2;
1653 xscale->ibcr0_used = 0;
1654 xscale->ibcr1_used = 0;
1655
1656 xscale->dbr_available = 2;
1657 xscale->dbr0_used = 0;
1658 xscale->dbr1_used = 0;
1659
1660 /* mark all hardware breakpoints as unset */
1661 while (breakpoint)
1662 {
1663 if (breakpoint->type == BKPT_HARD)
1664 {
1665 breakpoint->set = 0;
1666 }
1667 breakpoint = breakpoint->next;
1668 }
1669
1670 if (!xscale->handler_installed)
1671 {
1672 /* release SRST */
1673 jtag_add_reset(0, 0);
1674
1675 /* wait 300ms; 150 and 100ms were not enough */
1676 jtag_add_sleep(300*1000);
1677
1678 jtag_add_runtest(2030, TAP_RTI);
1679 jtag_execute_queue();
1680
1681 /* set Hold reset, Halt mode and Trap Reset */
1682 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1683 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1684 xscale_write_dcsr(target, 1, 0);
1685
1686 /* Load debug handler */
1687 if (fileio_open(&debug_handler, "xscale/debug_handler.bin", FILEIO_READ, FILEIO_BINARY) != ERROR_OK)
1688 {
1689 return ERROR_OK;
1690 }
1691
1692 if ((binary_size = debug_handler.size) % 4)
1693 {
1694 LOG_ERROR("debug_handler.bin: size not a multiple of 4");
1695 exit(-1);
1696 }
1697
1698 if (binary_size > 0x800)
1699 {
1700 LOG_ERROR("debug_handler.bin: larger than 2kb");
1701 exit(-1);
1702 }
1703
1704 binary_size = CEIL(binary_size, 32) * 32;
1705
1706 address = xscale->handler_address;
1707 while (binary_size > 0)
1708 {
1709 u32 cache_line[8];
1710 u8 buffer[32];
1711
1712 if ((retval = fileio_read(&debug_handler, 32, buffer, &buf_cnt)) != ERROR_OK)
1713 {
1714
1715 }
1716
1717 for (i = 0; i < buf_cnt; i += 4)
1718 {
1719 /* convert LE buffer to host-endian u32 */
1720 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1721 }
1722
1723 for (; buf_cnt < 32; buf_cnt += 4)
1724 {
1725 cache_line[buf_cnt / 4] = 0xe1a08008;
1726 }
1727
1728 /* only load addresses other than the reset vectors */
1729 if ((address % 0x400) != 0x0)
1730 {
1731 xscale_load_ic(target, 1, address, cache_line);
1732 }
1733
1734 address += buf_cnt;
1735 binary_size -= buf_cnt;
1736 };
1737
1738 xscale_load_ic(target, 1, 0x0, xscale->low_vectors);
1739 xscale_load_ic(target, 1, 0xffff0000, xscale->high_vectors);
1740
1741 jtag_add_runtest(30, TAP_RTI);
1742
1743 jtag_add_sleep(100000);
1744
1745 /* set Hold reset, Halt mode and Trap Reset */
1746 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1747 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1748 xscale_write_dcsr(target, 1, 0);
1749
1750 /* clear Hold reset to let the target run (should enter debug handler) */
1751 xscale_write_dcsr(target, 0, 1);
1752 target->state = TARGET_RUNNING;
1753
1754 if ((target->reset_mode != RESET_HALT) && (target->reset_mode != RESET_INIT))
1755 {
1756 jtag_add_sleep(10000);
1757
1758 /* we should have entered debug now */
1759 xscale_debug_entry(target);
1760 target->state = TARGET_HALTED;
1761
1762 /* resume the target */
1763 xscale_resume(target, 1, 0x0, 1, 0);
1764 }
1765
1766 fileio_close(&debug_handler);
1767 }
1768 else
1769 {
1770 jtag_add_reset(0, 0);
1771 }
1772
1773
1774 return ERROR_OK;
1775 }
1776
1777 int xscale_soft_reset_halt(struct target_s *target)
1778 {
1779
1780 return ERROR_OK;
1781 }
1782
1783 int xscale_prepare_reset_halt(struct target_s *target)
1784 {
1785 /* nothing to be done for reset_halt on XScale targets
1786 * we always halt after a reset to upload the debug handler
1787 */
1788 return ERROR_OK;
1789 }
1790
1791 int xscale_read_core_reg(struct target_s *target, int num, enum armv4_5_mode mode)
1792 {
1793
1794 return ERROR_OK;
1795 }
1796
1797 int xscale_write_core_reg(struct target_s *target, int num, enum armv4_5_mode mode, u32 value)
1798 {
1799
1800 return ERROR_OK;
1801 }
1802
1803 int xscale_full_context(target_t *target)
1804 {
1805 armv4_5_common_t *armv4_5 = target->arch_info;
1806
1807 u32 *buffer;
1808
1809 int i, j;
1810
1811 LOG_DEBUG("-");
1812
1813 if (target->state != TARGET_HALTED)
1814 {
1815 LOG_WARNING("target not halted");
1816 return ERROR_TARGET_NOT_HALTED;
1817 }
1818
1819 buffer = malloc(4 * 8);
1820
1821 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1822 * we can't enter User mode on an XScale (unpredictable),
1823 * but User shares registers with SYS
1824 */
1825 for(i = 1; i < 7; i++)
1826 {
1827 int valid = 1;
1828
1829 /* check if there are invalid registers in the current mode
1830 */
1831 for (j = 0; j <= 16; j++)
1832 {
1833 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid == 0)
1834 valid = 0;
1835 }
1836
1837 if (!valid)
1838 {
1839 u32 tmp_cpsr;
1840
1841 /* request banked registers */
1842 xscale_send_u32(target, 0x0);
1843
1844 tmp_cpsr = 0x0;
1845 tmp_cpsr |= armv4_5_number_to_mode(i);
1846 tmp_cpsr |= 0xc0; /* I/F bits */
1847
1848 /* send CPSR for desired mode */
1849 xscale_send_u32(target, tmp_cpsr);
1850
1851 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1852 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1853 {
1854 xscale_receive(target, buffer, 8);
1855 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1856 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1857 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).valid = 1;
1858 }
1859 else
1860 {
1861 xscale_receive(target, buffer, 7);
1862 }
1863
1864 /* move data from buffer to register cache */
1865 for (j = 8; j <= 14; j++)
1866 {
1867 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).value, 0, 32, buffer[j - 8]);
1868 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1869 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid = 1;
1870 }
1871 }
1872 }
1873
1874 free(buffer);
1875
1876 return ERROR_OK;
1877 }
1878
1879 int xscale_restore_context(target_t *target)
1880 {
1881 armv4_5_common_t *armv4_5 = target->arch_info;
1882
1883 int i, j;
1884
1885 LOG_DEBUG("-");
1886
1887 if (target->state != TARGET_HALTED)
1888 {
1889 LOG_WARNING("target not halted");
1890 return ERROR_TARGET_NOT_HALTED;
1891 }
1892
1893 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1894 * we can't enter User mode on an XScale (unpredictable),
1895 * but User shares registers with SYS
1896 */
1897 for(i = 1; i < 7; i++)
1898 {
1899 int dirty = 0;
1900
1901 /* check if there are invalid registers in the current mode
1902 */
1903 for (j = 8; j <= 14; j++)
1904 {
1905 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty == 1)
1906 dirty = 1;
1907 }
1908
1909 /* if not USR/SYS, check if the SPSR needs to be written */
1910 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1911 {
1912 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty == 1)
1913 dirty = 1;
1914 }
1915
1916 if (dirty)
1917 {
1918 u32 tmp_cpsr;
1919
1920 /* send banked registers */
1921 xscale_send_u32(target, 0x1);
1922
1923 tmp_cpsr = 0x0;
1924 tmp_cpsr |= armv4_5_number_to_mode(i);
1925 tmp_cpsr |= 0xc0; /* I/F bits */
1926
1927 /* send CPSR for desired mode */
1928 xscale_send_u32(target, tmp_cpsr);
1929
1930 /* send banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1931 for (j = 8; j <= 14; j++)
1932 {
1933 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, j).value, 0, 32));
1934 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1935 }
1936
1937 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1938 {
1939 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32));
1940 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1941 }
1942 }
1943 }
1944
1945 return ERROR_OK;
1946 }
1947
1948 int xscale_read_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer)
1949 {
1950 armv4_5_common_t *armv4_5 = target->arch_info;
1951 xscale_common_t *xscale = armv4_5->arch_info;
1952 u32 *buf32;
1953 int i;
1954 int retval;
1955
1956 LOG_DEBUG("address: 0x%8.8x, size: 0x%8.8x, count: 0x%8.8x", address, size, count);
1957
1958 if (target->state != TARGET_HALTED)
1959 {
1960 LOG_WARNING("target not halted");
1961 return ERROR_TARGET_NOT_HALTED;
1962 }
1963
1964 /* sanitize arguments */
1965 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1966 return ERROR_INVALID_ARGUMENTS;
1967
1968 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1969 return ERROR_TARGET_UNALIGNED_ACCESS;
1970
1971 /* send memory read request (command 0x1n, n: access size) */
1972 if ((retval=xscale_send_u32(target, 0x10 | size))!=ERROR_OK)
1973 return retval;
1974
1975 /* send base address for read request */
1976 if ((retval=xscale_send_u32(target, address))!=ERROR_OK)
1977 return retval;
1978
1979 /* send number of requested data words */
1980 if ((retval=xscale_send_u32(target, count))!=ERROR_OK)
1981 return retval;
1982
1983 /* receive data from target (count times 32-bit words in host endianness) */
1984 buf32 = malloc(4 * count);
1985 if ((retval=xscale_receive(target, buf32, count))!=ERROR_OK)
1986 return retval;
1987
1988 /* extract data from host-endian buffer into byte stream */
1989 for (i = 0; i < count; i++)
1990 {
1991 switch (size)
1992 {
1993 case 4:
1994 target_buffer_set_u32(target, buffer, buf32[i]);
1995 buffer += 4;
1996 break;
1997 case 2:
1998 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1999 buffer += 2;
2000 break;
2001 case 1:
2002 *buffer++ = buf32[i] & 0xff;
2003 break;
2004 default:
2005 LOG_ERROR("should never get here");
2006 exit(-1);
2007 }
2008 }
2009
2010 free(buf32);
2011
2012 /* examine DCSR, to see if Sticky Abort (SA) got set */
2013 if ((retval=xscale_read_dcsr(target))!=ERROR_OK)
2014 return retval;
2015 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
2016 {
2017 /* clear SA bit */
2018 if ((retval=xscale_send_u32(target, 0x60))!=ERROR_OK)
2019 return retval;
2020
2021 return ERROR_TARGET_DATA_ABORT;
2022 }
2023
2024 return ERROR_OK;
2025 }
2026
2027 int xscale_write_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer)
2028 {
2029 armv4_5_common_t *armv4_5 = target->arch_info;
2030 xscale_common_t *xscale = armv4_5->arch_info;
2031 int retval;
2032
2033 LOG_DEBUG("address: 0x%8.8x, size: 0x%8.8x, count: 0x%8.8x", address, size, count);
2034
2035 if (target->state != TARGET_HALTED)
2036 {
2037 LOG_WARNING("target not halted");
2038 return ERROR_TARGET_NOT_HALTED;
2039 }
2040
2041 /* sanitize arguments */
2042 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
2043 return ERROR_INVALID_ARGUMENTS;
2044
2045 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
2046 return ERROR_TARGET_UNALIGNED_ACCESS;
2047
2048 /* send memory write request (command 0x2n, n: access size) */
2049 if ((retval=xscale_send_u32(target, 0x20 | size))!=ERROR_OK)
2050 return retval;
2051
2052 /* send base address for read request */
2053 if ((retval=xscale_send_u32(target, address))!=ERROR_OK)
2054 return retval;
2055
2056 /* send number of requested data words to be written*/
2057 if ((retval=xscale_send_u32(target, count))!=ERROR_OK)
2058 return retval;
2059
2060 /* extract data from host-endian buffer into byte stream */
2061 #if 0
2062 for (i = 0; i < count; i++)
2063 {
2064 switch (size)
2065 {
2066 case 4:
2067 value = target_buffer_get_u32(target, buffer);
2068 xscale_send_u32(target, value);
2069 buffer += 4;
2070 break;
2071 case 2:
2072 value = target_buffer_get_u16(target, buffer);
2073 xscale_send_u32(target, value);
2074 buffer += 2;
2075 break;
2076 case 1:
2077 value = *buffer;
2078 xscale_send_u32(target, value);
2079 buffer += 1;
2080 break;
2081 default:
2082 LOG_ERROR("should never get here");
2083 exit(-1);
2084 }
2085 }
2086 #endif
2087 if ((retval=xscale_send(target, buffer, count, size))!=ERROR_OK)
2088 return retval;
2089
2090 /* examine DCSR, to see if Sticky Abort (SA) got set */
2091 if ((retval=xscale_read_dcsr(target))!=ERROR_OK)
2092 return retval;
2093 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
2094 {
2095 /* clear SA bit */
2096 if ((retval=xscale_send_u32(target, 0x60))!=ERROR_OK)
2097 return retval;
2098
2099 return ERROR_TARGET_DATA_ABORT;
2100 }
2101
2102 return ERROR_OK;
2103 }
2104
2105 int xscale_bulk_write_memory(target_t *target, u32 address, u32 count, u8 *buffer)
2106 {
2107 return xscale_write_memory(target, address, 4, count, buffer);
2108 }
2109
2110 int xscale_checksum_memory(struct target_s *target, u32 address, u32 count, u32* checksum)
2111 {
2112 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2113 }
2114
2115 u32 xscale_get_ttb(target_t *target)
2116 {
2117 armv4_5_common_t *armv4_5 = target->arch_info;
2118 xscale_common_t *xscale = armv4_5->arch_info;
2119 u32 ttb;
2120
2121 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
2122 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
2123
2124 return ttb;
2125 }
2126
2127 void xscale_disable_mmu_caches(target_t *target, int mmu, int d_u_cache, int i_cache)
2128 {
2129 armv4_5_common_t *armv4_5 = target->arch_info;
2130 xscale_common_t *xscale = armv4_5->arch_info;
2131 u32 cp15_control;
2132
2133 /* read cp15 control register */
2134 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2135 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2136
2137 if (mmu)
2138 cp15_control &= ~0x1U;
2139
2140 if (d_u_cache)
2141 {
2142 /* clean DCache */
2143 xscale_send_u32(target, 0x50);
2144 xscale_send_u32(target, xscale->cache_clean_address);
2145
2146 /* invalidate DCache */
2147 xscale_send_u32(target, 0x51);
2148
2149 cp15_control &= ~0x4U;
2150 }
2151
2152 if (i_cache)
2153 {
2154 /* invalidate ICache */
2155 xscale_send_u32(target, 0x52);
2156 cp15_control &= ~0x1000U;
2157 }
2158
2159 /* write new cp15 control register */
2160 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2161
2162 /* execute cpwait to ensure outstanding operations complete */
2163 xscale_send_u32(target, 0x53);
2164 }
2165
2166 void xscale_enable_mmu_caches(target_t *target, int mmu, int d_u_cache, int i_cache)
2167 {
2168 armv4_5_common_t *armv4_5 = target->arch_info;
2169 xscale_common_t *xscale = armv4_5->arch_info;
2170 u32 cp15_control;
2171
2172 /* read cp15 control register */
2173 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2174 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2175
2176 if (mmu)
2177 cp15_control |= 0x1U;
2178
2179 if (d_u_cache)
2180 cp15_control |= 0x4U;
2181
2182 if (i_cache)
2183 cp15_control |= 0x1000U;
2184
2185 /* write new cp15 control register */
2186 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2187
2188 /* execute cpwait to ensure outstanding operations complete */
2189 xscale_send_u32(target, 0x53);
2190 }
2191
2192 int xscale_set_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2193 {
2194 armv4_5_common_t *armv4_5 = target->arch_info;
2195 xscale_common_t *xscale = armv4_5->arch_info;
2196
2197 if (target->state != TARGET_HALTED)
2198 {
2199 LOG_WARNING("target not halted");
2200 return ERROR_TARGET_NOT_HALTED;
2201 }
2202
2203 if (xscale->force_hw_bkpts)
2204 breakpoint->type = BKPT_HARD;
2205
2206 if (breakpoint->set)
2207 {
2208 LOG_WARNING("breakpoint already set");
2209 return ERROR_OK;
2210 }
2211
2212 if (breakpoint->type == BKPT_HARD)
2213 {
2214 u32 value = breakpoint->address | 1;
2215 if (!xscale->ibcr0_used)
2216 {
2217 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2218 xscale->ibcr0_used = 1;
2219 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2220 }
2221 else if (!xscale->ibcr1_used)
2222 {
2223 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2224 xscale->ibcr1_used = 1;
2225 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2226 }
2227 else
2228 {
2229 LOG_ERROR("BUG: no hardware comparator available");
2230 return ERROR_OK;
2231 }
2232 }
2233 else if (breakpoint->type == BKPT_SOFT)
2234 {
2235 if (breakpoint->length == 4)
2236 {
2237 /* keep the original instruction in target endianness */
2238 target->type->read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr);
2239 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2240 target_write_u32(target, breakpoint->address, xscale->arm_bkpt);
2241 }
2242 else
2243 {
2244 /* keep the original instruction in target endianness */
2245 target->type->read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr);
2246 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2247 target_write_u32(target, breakpoint->address, xscale->thumb_bkpt);
2248 }
2249 breakpoint->set = 1;
2250 }
2251
2252 return ERROR_OK;
2253
2254 }
2255
2256 int xscale_add_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2257 {
2258 armv4_5_common_t *armv4_5 = target->arch_info;
2259 xscale_common_t *xscale = armv4_5->arch_info;
2260
2261 if (target->state != TARGET_HALTED)
2262 {
2263 LOG_WARNING("target not halted");
2264 return ERROR_TARGET_NOT_HALTED;
2265 }
2266
2267 if (xscale->force_hw_bkpts)
2268 {
2269 LOG_DEBUG("forcing use of hardware breakpoint at address 0x%8.8x", breakpoint->address);
2270 breakpoint->type = BKPT_HARD;
2271 }
2272
2273 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2274 {
2275 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2276 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2277 }
2278 else
2279 {
2280 xscale->ibcr_available--;
2281 }
2282
2283 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2284 {
2285 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2286 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2287 }
2288
2289 return ERROR_OK;
2290 }
2291
2292 int xscale_unset_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2293 {
2294 armv4_5_common_t *armv4_5 = target->arch_info;
2295 xscale_common_t *xscale = armv4_5->arch_info;
2296
2297 if (target->state != TARGET_HALTED)
2298 {
2299 LOG_WARNING("target not halted");
2300 return ERROR_TARGET_NOT_HALTED;
2301 }
2302
2303 if (!breakpoint->set)
2304 {
2305 LOG_WARNING("breakpoint not set");
2306 return ERROR_OK;
2307 }
2308
2309 if (breakpoint->type == BKPT_HARD)
2310 {
2311 if (breakpoint->set == 1)
2312 {
2313 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2314 xscale->ibcr0_used = 0;
2315 }
2316 else if (breakpoint->set == 2)
2317 {
2318 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2319 xscale->ibcr1_used = 0;
2320 }
2321 breakpoint->set = 0;
2322 }
2323 else
2324 {
2325 /* restore original instruction (kept in target endianness) */
2326 if (breakpoint->length == 4)
2327 {
2328 target->type->write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr);
2329 }
2330 else
2331 {
2332 target->type->write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr);
2333 }
2334 breakpoint->set = 0;
2335 }
2336
2337 return ERROR_OK;
2338 }
2339
2340 int xscale_remove_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2341 {
2342 armv4_5_common_t *armv4_5 = target->arch_info;
2343 xscale_common_t *xscale = armv4_5->arch_info;
2344
2345 if (target->state != TARGET_HALTED)
2346 {
2347 LOG_WARNING("target not halted");
2348 return ERROR_TARGET_NOT_HALTED;
2349 }
2350
2351 if (breakpoint->set)
2352 {
2353 xscale_unset_breakpoint(target, breakpoint);
2354 }
2355
2356 if (breakpoint->type == BKPT_HARD)
2357 xscale->ibcr_available++;
2358
2359 return ERROR_OK;
2360 }
2361
2362 int xscale_set_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2363 {
2364 armv4_5_common_t *armv4_5 = target->arch_info;
2365 xscale_common_t *xscale = armv4_5->arch_info;
2366 u8 enable=0;
2367 reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2368 u32 dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2369
2370 if (target->state != TARGET_HALTED)
2371 {
2372 LOG_WARNING("target not halted");
2373 return ERROR_TARGET_NOT_HALTED;
2374 }
2375
2376 xscale_get_reg(dbcon);
2377
2378 switch (watchpoint->rw)
2379 {
2380 case WPT_READ:
2381 enable = 0x3;
2382 break;
2383 case WPT_ACCESS:
2384 enable = 0x2;
2385 break;
2386 case WPT_WRITE:
2387 enable = 0x1;
2388 break;
2389 default:
2390 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2391 }
2392
2393 if (!xscale->dbr0_used)
2394 {
2395 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2396 dbcon_value |= enable;
2397 xscale_set_reg_u32(dbcon, dbcon_value);
2398 watchpoint->set = 1;
2399 xscale->dbr0_used = 1;
2400 }
2401 else if (!xscale->dbr1_used)
2402 {
2403 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2404 dbcon_value |= enable << 2;
2405 xscale_set_reg_u32(dbcon, dbcon_value);
2406 watchpoint->set = 2;
2407 xscale->dbr1_used = 1;
2408 }
2409 else
2410 {
2411 LOG_ERROR("BUG: no hardware comparator available");
2412 return ERROR_OK;
2413 }
2414
2415 return ERROR_OK;
2416 }
2417
2418 int xscale_add_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2419 {
2420 armv4_5_common_t *armv4_5 = target->arch_info;
2421 xscale_common_t *xscale = armv4_5->arch_info;
2422
2423 if (target->state != TARGET_HALTED)
2424 {
2425 LOG_WARNING("target not halted");
2426 return ERROR_TARGET_NOT_HALTED;
2427 }
2428
2429 if (xscale->dbr_available < 1)
2430 {
2431 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2432 }
2433
2434 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2435 {
2436 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2437 }
2438
2439 xscale->dbr_available--;
2440
2441 return ERROR_OK;
2442 }
2443
2444 int xscale_unset_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2445 {
2446 armv4_5_common_t *armv4_5 = target->arch_info;
2447 xscale_common_t *xscale = armv4_5->arch_info;
2448 reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2449 u32 dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2450
2451 if (target->state != TARGET_HALTED)
2452 {
2453 LOG_WARNING("target not halted");
2454 return ERROR_TARGET_NOT_HALTED;
2455 }
2456
2457 if (!watchpoint->set)
2458 {
2459 LOG_WARNING("breakpoint not set");
2460 return ERROR_OK;
2461 }
2462
2463 if (watchpoint->set == 1)
2464 {
2465 dbcon_value &= ~0x3;
2466 xscale_set_reg_u32(dbcon, dbcon_value);
2467 xscale->dbr0_used = 0;
2468 }
2469 else if (watchpoint->set == 2)
2470 {
2471 dbcon_value &= ~0xc;
2472 xscale_set_reg_u32(dbcon, dbcon_value);
2473 xscale->dbr1_used = 0;
2474 }
2475 watchpoint->set = 0;
2476
2477 return ERROR_OK;
2478 }
2479
2480 int xscale_remove_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2481 {
2482 armv4_5_common_t *armv4_5 = target->arch_info;
2483 xscale_common_t *xscale = armv4_5->arch_info;
2484
2485 if (target->state != TARGET_HALTED)
2486 {
2487 LOG_WARNING("target not halted");
2488 return ERROR_TARGET_NOT_HALTED;
2489 }
2490
2491 if (watchpoint->set)
2492 {
2493 xscale_unset_watchpoint(target, watchpoint);
2494 }
2495
2496 xscale->dbr_available++;
2497
2498 return ERROR_OK;
2499 }
2500
2501 void xscale_enable_watchpoints(struct target_s *target)
2502 {
2503 watchpoint_t *watchpoint = target->watchpoints;
2504
2505 while (watchpoint)
2506 {
2507 if (watchpoint->set == 0)
2508 xscale_set_watchpoint(target, watchpoint);
2509 watchpoint = watchpoint->next;
2510 }
2511 }
2512
2513 void xscale_enable_breakpoints(struct target_s *target)
2514 {
2515 breakpoint_t *breakpoint = target->breakpoints;
2516
2517 /* set any pending breakpoints */
2518 while (breakpoint)
2519 {
2520 if (breakpoint->set == 0)
2521 xscale_set_breakpoint(target, breakpoint);
2522 breakpoint = breakpoint->next;
2523 }
2524 }
2525
2526 int xscale_get_reg(reg_t *reg)
2527 {
2528 xscale_reg_t *arch_info = reg->arch_info;
2529 target_t *target = arch_info->target;
2530 armv4_5_common_t *armv4_5 = target->arch_info;
2531 xscale_common_t *xscale = armv4_5->arch_info;
2532
2533 /* DCSR, TX and RX are accessible via JTAG */
2534 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2535 {
2536 return xscale_read_dcsr(arch_info->target);
2537 }
2538 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2539 {
2540 /* 1 = consume register content */
2541 return xscale_read_tx(arch_info->target, 1);
2542 }
2543 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2544 {
2545 /* can't read from RX register (host -> debug handler) */
2546 return ERROR_OK;
2547 }
2548 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2549 {
2550 /* can't (explicitly) read from TXRXCTRL register */
2551 return ERROR_OK;
2552 }
2553 else /* Other DBG registers have to be transfered by the debug handler */
2554 {
2555 /* send CP read request (command 0x40) */
2556 xscale_send_u32(target, 0x40);
2557
2558 /* send CP register number */
2559 xscale_send_u32(target, arch_info->dbg_handler_number);
2560
2561 /* read register value */
2562 xscale_read_tx(target, 1);
2563 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2564
2565 reg->dirty = 0;
2566 reg->valid = 1;
2567 }
2568
2569 return ERROR_OK;
2570 }
2571
2572 int xscale_set_reg(reg_t *reg, u8* buf)
2573 {
2574 xscale_reg_t *arch_info = reg->arch_info;
2575 target_t *target = arch_info->target;
2576 armv4_5_common_t *armv4_5 = target->arch_info;
2577 xscale_common_t *xscale = armv4_5->arch_info;
2578 u32 value = buf_get_u32(buf, 0, 32);
2579
2580 /* DCSR, TX and RX are accessible via JTAG */
2581 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2582 {
2583 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2584 return xscale_write_dcsr(arch_info->target, -1, -1);
2585 }
2586 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2587 {
2588 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2589 return xscale_write_rx(arch_info->target);
2590 }
2591 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2592 {
2593 /* can't write to TX register (debug-handler -> host) */
2594 return ERROR_OK;
2595 }
2596 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2597 {
2598 /* can't (explicitly) write to TXRXCTRL register */
2599 return ERROR_OK;
2600 }
2601 else /* Other DBG registers have to be transfered by the debug handler */
2602 {
2603 /* send CP write request (command 0x41) */
2604 xscale_send_u32(target, 0x41);
2605
2606 /* send CP register number */
2607 xscale_send_u32(target, arch_info->dbg_handler_number);
2608
2609 /* send CP register value */
2610 xscale_send_u32(target, value);
2611 buf_set_u32(reg->value, 0, 32, value);
2612 }
2613
2614 return ERROR_OK;
2615 }
2616
2617 /* convenience wrapper to access XScale specific registers */
2618 int xscale_set_reg_u32(reg_t *reg, u32 value)
2619 {
2620 u8 buf[4];
2621
2622 buf_set_u32(buf, 0, 32, value);
2623
2624 return xscale_set_reg(reg, buf);
2625 }
2626
2627 int xscale_write_dcsr_sw(target_t *target, u32 value)
2628 {
2629 /* get pointers to arch-specific information */
2630 armv4_5_common_t *armv4_5 = target->arch_info;
2631 xscale_common_t *xscale = armv4_5->arch_info;
2632 reg_t *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2633 xscale_reg_t *dcsr_arch_info = dcsr->arch_info;
2634
2635 /* send CP write request (command 0x41) */
2636 xscale_send_u32(target, 0x41);
2637
2638 /* send CP register number */
2639 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2640
2641 /* send CP register value */
2642 xscale_send_u32(target, value);
2643 buf_set_u32(dcsr->value, 0, 32, value);
2644
2645 return ERROR_OK;
2646 }
2647
2648 int xscale_read_trace(target_t *target)
2649 {
2650 /* get pointers to arch-specific information */
2651 armv4_5_common_t *armv4_5 = target->arch_info;
2652 xscale_common_t *xscale = armv4_5->arch_info;
2653 xscale_trace_data_t **trace_data_p;
2654
2655 /* 258 words from debug handler
2656 * 256 trace buffer entries
2657 * 2 checkpoint addresses
2658 */
2659 u32 trace_buffer[258];
2660 int is_address[256];
2661 int i, j;
2662
2663 if (target->state != TARGET_HALTED)
2664 {
2665 LOG_WARNING("target must be stopped to read trace data");
2666 return ERROR_TARGET_NOT_HALTED;
2667 }
2668
2669 /* send read trace buffer command (command 0x61) */
2670 xscale_send_u32(target, 0x61);
2671
2672 /* receive trace buffer content */
2673 xscale_receive(target, trace_buffer, 258);
2674
2675 /* parse buffer backwards to identify address entries */
2676 for (i = 255; i >= 0; i--)
2677 {
2678 is_address[i] = 0;
2679 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2680 ((trace_buffer[i] & 0xf0) == 0xd0))
2681 {
2682 if (i >= 3)
2683 is_address[--i] = 1;
2684 if (i >= 2)
2685 is_address[--i] = 1;
2686 if (i >= 1)
2687 is_address[--i] = 1;
2688 if (i >= 0)
2689 is_address[--i] = 1;
2690 }
2691 }
2692
2693
2694 /* search first non-zero entry */
2695 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2696 ;
2697
2698 if (j == 256)
2699 {
2700 LOG_DEBUG("no trace data collected");
2701 return ERROR_XSCALE_NO_TRACE_DATA;
2702 }
2703
2704 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2705 ;
2706
2707 *trace_data_p = malloc(sizeof(xscale_trace_data_t));
2708 (*trace_data_p)->next = NULL;
2709 (*trace_data_p)->chkpt0 = trace_buffer[256];
2710 (*trace_data_p)->chkpt1 = trace_buffer[257];
2711 (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
2712 (*trace_data_p)->entries = malloc(sizeof(xscale_trace_entry_t) * (256 - j));
2713 (*trace_data_p)->depth = 256 - j;
2714
2715 for (i = j; i < 256; i++)
2716 {
2717 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2718 if (is_address[i])
2719 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2720 else
2721 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2722 }
2723
2724 return ERROR_OK;
2725 }
2726
2727 int xscale_read_instruction(target_t *target, arm_instruction_t *instruction)
2728 {
2729 /* get pointers to arch-specific information */
2730 armv4_5_common_t *armv4_5 = target->arch_info;
2731 xscale_common_t *xscale = armv4_5->arch_info;
2732 int i;
2733 int section = -1;
2734 u32 size_read;
2735 u32 opcode;
2736 int retval;
2737
2738 if (!xscale->trace.image)
2739 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2740
2741 /* search for the section the current instruction belongs to */
2742 for (i = 0; i < xscale->trace.image->num_sections; i++)
2743 {
2744 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2745 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2746 {
2747 section = i;
2748 break;
2749 }
2750 }
2751
2752 if (section == -1)
2753 {
2754 /* current instruction couldn't be found in the image */
2755 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2756 }
2757
2758 if (xscale->trace.core_state == ARMV4_5_STATE_ARM)
2759 {
2760 u8 buf[4];
2761 if ((retval = image_read_section(xscale->trace.image, section,
2762 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2763 4, buf, &size_read)) != ERROR_OK)
2764 {
2765 LOG_ERROR("error while reading instruction: %i", retval);
2766 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2767 }
2768 opcode = target_buffer_get_u32(target, buf);
2769 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2770 }
2771 else if (xscale->trace.core_state == ARMV4_5_STATE_THUMB)
2772 {
2773 u8 buf[2];
2774 if ((retval = image_read_section(xscale->trace.image, section,
2775 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2776 2, buf, &size_read)) != ERROR_OK)
2777 {
2778 LOG_ERROR("error while reading instruction: %i", retval);
2779 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2780 }
2781 opcode = target_buffer_get_u16(target, buf);
2782 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2783 }
2784 else
2785 {
2786 LOG_ERROR("BUG: unknown core state encountered");
2787 exit(-1);
2788 }
2789
2790 return ERROR_OK;
2791 }
2792
2793 int xscale_branch_address(xscale_trace_data_t *trace_data, int i, u32 *target)
2794 {
2795 /* if there are less than four entries prior to the indirect branch message
2796 * we can't extract the address */
2797 if (i < 4)
2798 {
2799 return -1;
2800 }
2801
2802 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2803 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2804
2805 return 0;
2806 }
2807
2808 int xscale_analyze_trace(target_t *target, command_context_t *cmd_ctx)
2809 {
2810 /* get pointers to arch-specific information */
2811 armv4_5_common_t *armv4_5 = target->arch_info;
2812 xscale_common_t *xscale = armv4_5->arch_info;
2813 int next_pc_ok = 0;
2814 u32 next_pc = 0x0;
2815 xscale_trace_data_t *trace_data = xscale->trace.data;
2816 int retval;
2817
2818 while (trace_data)
2819 {
2820 int i, chkpt;
2821 int rollover;
2822 int branch;
2823 int exception;
2824 xscale->trace.core_state = ARMV4_5_STATE_ARM;
2825
2826 chkpt = 0;
2827 rollover = 0;
2828
2829 for (i = 0; i < trace_data->depth; i++)
2830 {
2831 next_pc_ok = 0;
2832 branch = 0;
2833 exception = 0;
2834
2835 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2836 continue;
2837
2838 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2839 {
2840 case 0: /* Exceptions */
2841 case 1:
2842 case 2:
2843 case 3:
2844 case 4:
2845 case 5:
2846 case 6:
2847 case 7:
2848 exception = (trace_data->entries[i].data & 0x70) >> 4;
2849 next_pc_ok = 1;
2850 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2851 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2852 break;
2853 case 8: /* Direct Branch */
2854 branch = 1;
2855 break;
2856 case 9: /* Indirect Branch */
2857 branch = 1;
2858 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2859 {
2860 next_pc_ok = 1;
2861 }
2862 break;
2863 case 13: /* Checkpointed Indirect Branch */
2864 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2865 {
2866 next_pc_ok = 1;
2867 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2868 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2869 LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2870 }
2871 /* explicit fall-through */
2872 case 12: /* Checkpointed Direct Branch */
2873 branch = 1;
2874 if (chkpt == 0)
2875 {
2876 next_pc_ok = 1;
2877 next_pc = trace_data->chkpt0;
2878 chkpt++;
2879 }
2880 else if (chkpt == 1)
2881 {
2882 next_pc_ok = 1;
2883 next_pc = trace_data->chkpt0;
2884 chkpt++;
2885 }
2886 else
2887 {
2888 LOG_WARNING("more than two checkpointed branches encountered");
2889 }
2890 break;
2891 case 15: /* Roll-over */
2892 rollover++;
2893 continue;
2894 default: /* Reserved */
2895 command_print(cmd_ctx, "--- reserved trace message ---");
2896 LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2897 return ERROR_OK;
2898 }
2899
2900 if (xscale->trace.pc_ok)
2901 {
2902 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2903 arm_instruction_t instruction;
2904
2905 if ((exception == 6) || (exception == 7))
2906 {
2907 /* IRQ or FIQ exception, no instruction executed */
2908 executed -= 1;
2909 }
2910
2911 while (executed-- >= 0)
2912 {
2913 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2914 {
2915 /* can't continue tracing with no image available */
2916 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2917 {
2918 return retval;
2919 }
2920 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2921 {
2922 /* TODO: handle incomplete images */
2923 }
2924 }
2925
2926 /* a precise abort on a load to the PC is included in the incremental
2927 * word count, other instructions causing data aborts are not included
2928 */
2929 if ((executed == 0) && (exception == 4)
2930 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2931 {
2932 if ((instruction.type == ARM_LDM)
2933 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2934 {
2935 executed--;
2936 }
2937 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2938 && (instruction.info.load_store.Rd != 15))
2939 {
2940 executed--;
2941 }
2942 }
2943
2944 /* only the last instruction executed
2945 * (the one that caused the control flow change)
2946 * could be a taken branch
2947 */
2948 if (((executed == -1) && (branch == 1)) &&
2949 (((instruction.type == ARM_B) ||
2950 (instruction.type == ARM_BL) ||
2951 (instruction.type == ARM_BLX)) &&
2952 (instruction.info.b_bl_bx_blx.target_address != -1)))
2953 {
2954 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2955 }
2956 else
2957 {
2958 xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2;
2959 }
2960 command_print(cmd_ctx, "%s", instruction.text);
2961 }
2962
2963 rollover = 0;
2964 }
2965
2966 if (next_pc_ok)
2967 {
2968 xscale->trace.current_pc = next_pc;
2969 xscale->trace.pc_ok = 1;
2970 }
2971 }
2972
2973 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2)
2974 {
2975 arm_instruction_t instruction;
2976 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2977 {
2978 /* can't continue tracing with no image available */
2979 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2980 {
2981 return retval;
2982 }
2983 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2984 {
2985 /* TODO: handle incomplete images */
2986 }
2987 }
2988 command_print(cmd_ctx, "%s", instruction.text);
2989 }
2990
2991 trace_data = trace_data->next;
2992 }
2993
2994 return ERROR_OK;
2995 }
2996
2997 void xscale_build_reg_cache(target_t *target)
2998 {
2999 /* get pointers to arch-specific information */
3000 armv4_5_common_t *armv4_5 = target->arch_info;
3001 xscale_common_t *xscale = armv4_5->arch_info;
3002
3003 reg_cache_t **cache_p = register_get_last_cache_p(&target->reg_cache);
3004 xscale_reg_t *arch_info = malloc(sizeof(xscale_reg_arch_info));
3005 int i;
3006 int num_regs = sizeof(xscale_reg_arch_info) / sizeof(xscale_reg_t);
3007
3008 (*cache_p) = armv4_5_build_reg_cache(target, armv4_5);
3009 armv4_5->core_cache = (*cache_p);
3010
3011 /* register a register arch-type for XScale dbg registers only once */
3012 if (xscale_reg_arch_type == -1)
3013 xscale_reg_arch_type = register_reg_arch_type(xscale_get_reg, xscale_set_reg);
3014
3015 (*cache_p)->next = malloc(sizeof(reg_cache_t));
3016 cache_p = &(*cache_p)->next;
3017
3018 /* fill in values for the xscale reg cache */
3019 (*cache_p)->name = "XScale registers";
3020 (*cache_p)->next = NULL;
3021 (*cache_p)->reg_list = malloc(num_regs * sizeof(reg_t));
3022 (*cache_p)->num_regs = num_regs;
3023
3024 for (i = 0; i < num_regs; i++)
3025 {
3026 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
3027 (*cache_p)->reg_list[i].value = calloc(4, 1);
3028 (*cache_p)->reg_list[i].dirty = 0;
3029 (*cache_p)->reg_list[i].valid = 0;
3030 (*cache_p)->reg_list[i].size = 32;
3031 (*cache_p)->reg_list[i].bitfield_desc = NULL;
3032 (*cache_p)->reg_list[i].num_bitfields = 0;
3033 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
3034 (*cache_p)->reg_list[i].arch_type = xscale_reg_arch_type;
3035 arch_info[i] = xscale_reg_arch_info[i];
3036 arch_info[i].target = target;
3037 }
3038
3039 xscale->reg_cache = (*cache_p);
3040 }
3041
3042 int xscale_init_target(struct command_context_s *cmd_ctx, struct target_s *target)
3043 {
3044 return ERROR_OK;
3045 }
3046
3047 int xscale_quit()
3048 {
3049
3050 return ERROR_OK;
3051 }
3052
3053 int xscale_init_arch_info(target_t *target, xscale_common_t *xscale, int chain_pos, char *variant)
3054 {
3055 armv4_5_common_t *armv4_5;
3056 u32 high_reset_branch, low_reset_branch;
3057 int i;
3058
3059 armv4_5 = &xscale->armv4_5_common;
3060
3061 /* store architecture specfic data (none so far) */
3062 xscale->arch_info = NULL;
3063 xscale->common_magic = XSCALE_COMMON_MAGIC;
3064
3065 /* remember the variant (PXA25x, PXA27x, IXP42x, ...) */
3066 xscale->variant = strdup(variant);
3067
3068 /* prepare JTAG information for the new target */
3069 xscale->jtag_info.chain_pos = chain_pos;
3070
3071 xscale->jtag_info.dbgrx = 0x02;
3072 xscale->jtag_info.dbgtx = 0x10;
3073 xscale->jtag_info.dcsr = 0x09;
3074 xscale->jtag_info.ldic = 0x07;
3075
3076 if ((strcmp(xscale->variant, "pxa250") == 0) ||
3077 (strcmp(xscale->variant, "pxa255") == 0) ||
3078 (strcmp(xscale->variant, "pxa26x") == 0))
3079 {
3080 xscale->jtag_info.ir_length = 5;
3081 }
3082 else if ((strcmp(xscale->variant, "pxa27x") == 0) ||
3083 (strcmp(xscale->variant, "ixp42x") == 0) ||
3084 (strcmp(xscale->variant, "ixp45x") == 0) ||
3085 (strcmp(xscale->variant, "ixp46x") == 0))
3086 {
3087 xscale->jtag_info.ir_length = 7;
3088 }
3089
3090 /* the debug handler isn't installed (and thus not running) at this time */
3091 xscale->handler_installed = 0;
3092 xscale->handler_running = 0;
3093 xscale->handler_address = 0xfe000800;
3094
3095 /* clear the vectors we keep locally for reference */
3096 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
3097 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
3098
3099 /* no user-specified vectors have been configured yet */
3100 xscale->static_low_vectors_set = 0x0;
3101 xscale->static_high_vectors_set = 0x0;
3102
3103 /* calculate branches to debug handler */
3104 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
3105 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
3106
3107 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
3108 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
3109
3110 for (i = 1; i <= 7; i++)
3111 {
3112 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3113 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3114 }
3115
3116 /* 64kB aligned region used for DCache cleaning */
3117 xscale->cache_clean_address = 0xfffe0000;
3118
3119 xscale->hold_rst = 0;
3120 xscale->external_debug_break = 0;
3121
3122 xscale->force_hw_bkpts = 1;
3123
3124 xscale->ibcr_available = 2;
3125 xscale->ibcr0_used = 0;
3126 xscale->ibcr1_used = 0;
3127
3128 xscale->dbr_available = 2;
3129 xscale->dbr0_used = 0;
3130 xscale->dbr1_used = 0;
3131
3132 xscale->arm_bkpt = ARMV5_BKPT(0x0);
3133 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
3134
3135 xscale->vector_catch = 0x1;
3136
3137 xscale->trace.capture_status = TRACE_IDLE;
3138 xscale->trace.data = NULL;
3139 xscale->trace.image = NULL;
3140 xscale->trace.buffer_enabled = 0;
3141 xscale->trace.buffer_fill = 0;
3142
3143 /* prepare ARMv4/5 specific information */
3144 armv4_5->arch_info = xscale;
3145 armv4_5->read_core_reg = xscale_read_core_reg;
3146 armv4_5->write_core_reg = xscale_write_core_reg;
3147 armv4_5->full_context = xscale_full_context;
3148
3149 armv4_5_init_arch_info(target, armv4_5);
3150
3151 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
3152 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
3153 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
3154 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
3155 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
3156 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
3157 xscale->armv4_5_mmu.has_tiny_pages = 1;
3158 xscale->armv4_5_mmu.mmu_enabled = 0;
3159
3160 xscale->fast_memory_access = 0;
3161
3162 return ERROR_OK;
3163 }
3164
3165 /* target xscale <endianess> <startup_mode> <chain_pos> <variant> */
3166 int xscale_target_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc, struct target_s *target)
3167 {
3168 int chain_pos;
3169 char *variant = NULL;
3170 xscale_common_t *xscale = malloc(sizeof(xscale_common_t));
3171 memset(xscale, 0, sizeof(*xscale));
3172
3173 if (argc < 5)
3174 {
3175 LOG_ERROR("'target xscale' requires four arguments: <endianess> <startup_mode> <chain_pos> <variant>");
3176 return ERROR_OK;
3177 }
3178
3179 chain_pos = strtoul(args[3], NULL, 0);
3180
3181 variant = args[4];
3182
3183 xscale_init_arch_info(target, xscale, chain_pos, variant);
3184 xscale_build_reg_cache(target);
3185
3186 return ERROR_OK;
3187 }
3188
3189 int xscale_handle_debug_handler_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3190 {
3191 target_t *target = NULL;
3192 armv4_5_common_t *armv4_5;
3193 xscale_common_t *xscale;
3194
3195 u32 handler_address;
3196
3197 if (argc < 2)
3198 {
3199 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3200 return ERROR_OK;
3201 }
3202
3203 if ((target = get_target_by_num(strtoul(args[0], NULL, 0))) == NULL)
3204 {
3205 LOG_ERROR("no target '%s' configured", args[0]);
3206 return ERROR_OK;
3207 }
3208
3209 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3210 {
3211 return ERROR_OK;
3212 }
3213
3214 handler_address = strtoul(args[1], NULL, 0);
3215
3216 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3217 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3218 {
3219 xscale->handler_address = handler_address;
3220 }
3221 else
3222 {
3223 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3224 }
3225
3226 return ERROR_OK;
3227 }
3228
3229 int xscale_handle_cache_clean_address_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3230 {
3231 target_t *target = NULL;
3232 armv4_5_common_t *armv4_5;
3233 xscale_common_t *xscale;
3234
3235 u32 cache_clean_address;
3236
3237 if (argc < 2)
3238 {
3239 LOG_ERROR("'xscale cache_clean_address <target#> <address>' command takes two required operands");
3240 return ERROR_OK;
3241 }
3242
3243 if ((target = get_target_by_num(strtoul(args[0], NULL, 0))) == NULL)
3244 {
3245 LOG_ERROR("no target '%s' configured", args[0]);
3246 return ERROR_OK;
3247 }
3248
3249 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3250 {
3251 return ERROR_OK;
3252 }
3253
3254 cache_clean_address = strtoul(args[1], NULL, 0);
3255
3256 if (cache_clean_address & 0xffff)
3257 {
3258 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3259 }
3260 else
3261 {
3262 xscale->cache_clean_address = cache_clean_address;
3263 }
3264
3265 return ERROR_OK;
3266 }
3267
3268 int xscale_handle_cache_info_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3269 {
3270 target_t *target = get_current_target(cmd_ctx);
3271 armv4_5_common_t *armv4_5;
3272 xscale_common_t *xscale;
3273
3274 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3275 {
3276 return ERROR_OK;
3277 }
3278
3279 return armv4_5_handle_cache_info_command(cmd_ctx, &xscale->armv4_5_mmu.armv4_5_cache);
3280 }
3281
3282 static int xscale_virt2phys(struct target_s *target, u32 virtual, u32 *physical)
3283 {
3284 armv4_5_common_t *armv4_5;
3285 xscale_common_t *xscale;
3286 int retval;
3287 int type;
3288 u32 cb;
3289 int domain;
3290 u32 ap;
3291
3292
3293 if ((retval = xscale_get_arch_pointers(target, &armv4_5, &xscale)) != ERROR_OK)
3294 {
3295 return retval;
3296 }
3297 u32 ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3298 if (type == -1)
3299 {
3300 return ret;
3301 }
3302 *physical = ret;
3303 return ERROR_OK;
3304 }
3305
3306 static int xscale_mmu(struct target_s *target, int *enabled)
3307 {
3308 armv4_5_common_t *armv4_5 = target->arch_info;
3309 xscale_common_t *xscale = armv4_5->arch_info;
3310
3311 if (target->state != TARGET_HALTED)
3312 {
3313 LOG_ERROR("Target not halted");
3314 return ERROR_TARGET_INVALID;
3315 }
3316 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3317 return ERROR_OK;
3318 }
3319
3320
3321 int xscale_handle_mmu_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3322 {
3323 target_t *target = get_current_target(cmd_ctx);
3324 armv4_5_common_t *armv4_5;
3325 xscale_common_t *xscale;
3326
3327 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3328 {
3329 return ERROR_OK;
3330 }
3331
3332 if (target->state != TARGET_HALTED)
3333 {
3334 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3335 return ERROR_OK;
3336 }
3337
3338 if (argc >= 1)
3339 {
3340 if (strcmp("enable", args[0]) == 0)
3341 {
3342 xscale_enable_mmu_caches(target, 1, 0, 0);
3343 xscale->armv4_5_mmu.mmu_enabled = 1;
3344 }
3345 else if (strcmp("disable", args[0]) == 0)
3346 {
3347 xscale_disable_mmu_caches(target, 1, 0, 0);
3348 xscale->armv4_5_mmu.mmu_enabled = 0;
3349 }
3350 }
3351
3352 command_print(cmd_ctx, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3353
3354 return ERROR_OK;
3355 }
3356
3357 int xscale_handle_idcache_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3358 {
3359 target_t *target = get_current_target(cmd_ctx);
3360 armv4_5_common_t *armv4_5;
3361 xscale_common_t *xscale;
3362 int icache = 0, dcache = 0;
3363
3364 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3365 {
3366 return ERROR_OK;
3367 }
3368
3369 if (target->state != TARGET_HALTED)
3370 {
3371 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3372 return ERROR_OK;
3373 }
3374
3375 if (strcmp(cmd, "icache") == 0)
3376 icache = 1;
3377 else if (strcmp(cmd, "dcache") == 0)
3378 dcache = 1;
3379
3380 if (argc >= 1)
3381 {
3382 if (strcmp("enable", args[0]) == 0)
3383 {
3384 xscale_enable_mmu_caches(target, 0, dcache, icache);
3385
3386 if (icache)
3387 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 1;
3388 else if (dcache)
3389 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 1;
3390 }
3391 else if (strcmp("disable", args[0]) == 0)
3392 {
3393 xscale_disable_mmu_caches(target, 0, dcache, icache);
3394
3395 if (icache)
3396 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 0;
3397 else if (dcache)
3398 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 0;
3399 }
3400 }
3401
3402 if (icache)
3403 command_print(cmd_ctx, "icache %s", (xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled) ? "enabled" : "disabled");
3404
3405 if (dcache)
3406 command_print(cmd_ctx, "dcache %s", (xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled) ? "enabled" : "disabled");
3407
3408 return ERROR_OK;
3409 }
3410
3411 int xscale_handle_vector_catch_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3412 {
3413 target_t *target = get_current_target(cmd_ctx);
3414 armv4_5_common_t *armv4_5;
3415 xscale_common_t *xscale;
3416
3417 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3418 {
3419 return ERROR_OK;
3420 }
3421
3422 if (argc < 1)
3423 {
3424 command_print(cmd_ctx, "usage: xscale vector_catch [mask]");
3425 }
3426 else
3427 {
3428 xscale->vector_catch = strtoul(args[0], NULL, 0);
3429 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3430 xscale_write_dcsr(target, -1, -1);
3431 }
3432
3433 command_print(cmd_ctx, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3434
3435 return ERROR_OK;
3436 }
3437
3438 int xscale_handle_force_hw_bkpts_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3439 {
3440 target_t *target = get_current_target(cmd_ctx);
3441 armv4_5_common_t *armv4_5;
3442 xscale_common_t *xscale;
3443
3444 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3445 {
3446 return ERROR_OK;
3447 }
3448
3449 if ((argc >= 1) && (strcmp("enable", args[0]) == 0))
3450 {
3451 xscale->force_hw_bkpts = 1;
3452 }
3453 else if ((argc >= 1) && (strcmp("disable", args[0]) == 0))
3454 {
3455 xscale->force_hw_bkpts = 0;
3456 }
3457 else
3458 {
3459 command_print(cmd_ctx, "usage: xscale force_hw_bkpts <enable|disable>");
3460 }
3461
3462 command_print(cmd_ctx, "force hardware breakpoints %s", (xscale->force_hw_bkpts) ? "enabled" : "disabled");
3463
3464 return ERROR_OK;
3465 }
3466
3467 int xscale_handle_trace_buffer_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3468 {
3469 target_t *target = get_current_target(cmd_ctx);
3470 armv4_5_common_t *armv4_5;
3471 xscale_common_t *xscale;
3472 u32 dcsr_value;
3473
3474 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3475 {
3476 return ERROR_OK;
3477 }
3478
3479 if (target->state != TARGET_HALTED)
3480 {
3481 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3482 return ERROR_OK;
3483 }
3484
3485 if ((argc >= 1) && (strcmp("enable", args[0]) == 0))
3486 {
3487 xscale_trace_data_t *td, *next_td;
3488 xscale->trace.buffer_enabled = 1;
3489
3490 /* free old trace data */
3491 td = xscale->trace.data;
3492 while (td)
3493 {
3494 next_td = td->next;
3495
3496 if (td->entries)
3497 free(td->entries);
3498 free(td);
3499 td = next_td;
3500 }
3501 xscale->trace.data = NULL;
3502 }
3503 else if ((argc >= 1) && (strcmp("disable", args[0]) == 0))
3504 {
3505 xscale->trace.buffer_enabled = 0;
3506 }
3507
3508 if ((argc >= 2) && (strcmp("fill", args[1]) == 0))
3509 {
3510 if (argc >= 3)
3511 xscale->trace.buffer_fill = strtoul(args[2], NULL, 0);
3512 else
3513 xscale->trace.buffer_fill = 1;
3514 }
3515 else if ((argc >= 2) && (strcmp("wrap", args[1]) == 0))
3516 {
3517 xscale->trace.buffer_fill = -1;
3518 }
3519
3520 if (xscale->trace.buffer_enabled)
3521 {
3522 /* if we enable the trace buffer in fill-once
3523 * mode we know the address of the first instruction */
3524 xscale->trace.pc_ok = 1;
3525 xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
3526 }
3527 else
3528 {
3529 /* otherwise the address is unknown, and we have no known good PC */
3530 xscale->trace.pc_ok = 0;
3531 }
3532
3533 command_print(cmd_ctx, "trace buffer %s (%s)",
3534 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3535 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3536
3537 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3538 if (xscale->trace.buffer_fill >= 0)
3539 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3540 else
3541 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3542
3543 return ERROR_OK;
3544 }
3545
3546 int xscale_handle_trace_image_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3547 {
3548 target_t *target;
3549 armv4_5_common_t *armv4_5;
3550 xscale_common_t *xscale;
3551
3552 if (argc < 1)
3553 {
3554 command_print(cmd_ctx, "usage: xscale trace_image <file> [base address] [type]");
3555 return ERROR_OK;
3556 }
3557
3558 target = get_current_target(cmd_ctx);
3559
3560 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3561 {
3562 return ERROR_OK;
3563 }
3564
3565 if (xscale->trace.image)
3566 {
3567 image_close(xscale->trace.image);
3568 free(xscale->trace.image);
3569 command_print(cmd_ctx, "previously loaded image found and closed");
3570 }
3571
3572 xscale->trace.image = malloc(sizeof(image_t));
3573 xscale->trace.image->base_address_set = 0;
3574 xscale->trace.image->start_address_set = 0;
3575
3576 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3577 if (argc >= 2)
3578 {
3579 xscale->trace.image->base_address_set = 1;
3580 xscale->trace.image->base_address = strtoul(args[1], NULL, 0);
3581 }
3582 else
3583 {
3584 xscale->trace.image->base_address_set = 0;
3585 }
3586
3587 if (image_open(xscale->trace.image, args[0], (argc >= 3) ? args[2] : NULL) != ERROR_OK)
3588 {
3589 free(xscale->trace.image);
3590 xscale->trace.image = NULL;
3591 return ERROR_OK;
3592 }
3593
3594 return ERROR_OK;
3595 }
3596
3597 int xscale_handle_dump_trace_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3598 {
3599 target_t *target = get_current_target(cmd_ctx);
3600 armv4_5_common_t *armv4_5;
3601 xscale_common_t *xscale;
3602 xscale_trace_data_t *trace_data;
3603 fileio_t file;
3604
3605 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3606 {
3607 return ERROR_OK;
3608 }
3609
3610 if (target->state != TARGET_HALTED)
3611 {
3612 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3613 return ERROR_OK;
3614 }
3615
3616 if (argc < 1)
3617 {
3618 command_print(cmd_ctx, "usage: xscale dump_trace <file>");
3619 return ERROR_OK;
3620 }
3621
3622 trace_data = xscale->trace.data;
3623
3624 if (!trace_data)
3625 {
3626 command_print(cmd_ctx, "no trace data collected");
3627 return ERROR_OK;
3628 }
3629
3630 if (fileio_open(&file, args[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3631 {
3632 return ERROR_OK;
3633 }
3634
3635 while (trace_data)
3636 {
3637 int i;
3638
3639 fileio_write_u32(&file, trace_data->chkpt0);
3640 fileio_write_u32(&file, trace_data->chkpt1);
3641 fileio_write_u32(&file, trace_data->last_instruction);
3642 fileio_write_u32(&file, trace_data->depth);
3643
3644 for (i = 0; i < trace_data->depth; i++)
3645 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3646
3647 trace_data = trace_data->next;
3648 }
3649
3650 fileio_close(&file);
3651
3652 return ERROR_OK;
3653 }
3654
3655 int xscale_handle_analyze_trace_buffer_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3656 {
3657 target_t *target = get_current_target(cmd_ctx);
3658 armv4_5_common_t *armv4_5;
3659 xscale_common_t *xscale;
3660
3661 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3662 {
3663 return ERROR_OK;
3664 }
3665
3666 xscale_analyze_trace(target, cmd_ctx);
3667
3668 return ERROR_OK;
3669 }
3670
3671 int xscale_handle_cp15(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3672 {
3673 target_t *target = get_current_target(cmd_ctx);
3674 armv4_5_common_t *armv4_5;
3675 xscale_common_t *xscale;
3676
3677 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3678 {
3679 return ERROR_OK;
3680 }
3681
3682 if (target->state != TARGET_HALTED)
3683 {
3684 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3685 return ERROR_OK;
3686 }
3687 u32 reg_no = 0;
3688 reg_t *reg = NULL;
3689 if(argc > 0)
3690 {
3691 reg_no = strtoul(args[0], NULL, 0);
3692 /*translate from xscale cp15 register no to openocd register*/
3693 switch(reg_no)
3694 {
3695 case 0:
3696 reg_no = XSCALE_MAINID;
3697 break;
3698 case 1:
3699 reg_no = XSCALE_CTRL;
3700 break;
3701 case 2:
3702 reg_no = XSCALE_TTB;
3703 break;
3704 case 3:
3705 reg_no = XSCALE_DAC;
3706 break;
3707 case 5:
3708 reg_no = XSCALE_FSR;
3709 break;
3710 case 6:
3711 reg_no = XSCALE_FAR;
3712 break;
3713 case 13:
3714 reg_no = XSCALE_PID;
3715 break;
3716 case 15:
3717 reg_no = XSCALE_CPACCESS;
3718 break;
3719 default:
3720 command_print(cmd_ctx, "invalid register number");
3721 return ERROR_INVALID_ARGUMENTS;
3722 }
3723 reg = &xscale->reg_cache->reg_list[reg_no];
3724
3725 }
3726 if(argc == 1)
3727 {
3728 u32 value;
3729
3730 /* read cp15 control register */
3731 xscale_get_reg(reg);
3732 value = buf_get_u32(reg->value, 0, 32);
3733 command_print(cmd_ctx, "%s (/%i): 0x%x", reg->name, reg->size, value);
3734 }
3735 else if(argc == 2)
3736 {
3737
3738 u32 value = strtoul(args[1], NULL, 0);
3739
3740 /* send CP write request (command 0x41) */
3741 xscale_send_u32(target, 0x41);
3742
3743 /* send CP register number */
3744 xscale_send_u32(target, reg_no);
3745
3746 /* send CP register value */
3747 xscale_send_u32(target, value);
3748
3749 /* execute cpwait to ensure outstanding operations complete */
3750 xscale_send_u32(target, 0x53);
3751 }
3752 else
3753 {
3754 command_print(cmd_ctx, "usage: cp15 [register]<, [value]>");
3755 }
3756
3757 return ERROR_OK;
3758 }
3759
3760 int handle_xscale_fast_memory_access_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3761 {
3762 target_t *target = get_current_target(cmd_ctx);
3763 armv4_5_common_t *armv4_5;
3764 xscale_common_t *xscale;
3765
3766 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3767 {
3768 return ERROR_OK;
3769 }
3770
3771 if (argc == 1)
3772 {
3773 if (strcmp("enable", args[0]) == 0)
3774 {
3775 xscale->fast_memory_access = 1;
3776 }
3777 else if (strcmp("disable", args[0]) == 0)
3778 {
3779 xscale->fast_memory_access = 0;
3780 }
3781 else
3782 {
3783 return ERROR_COMMAND_SYNTAX_ERROR;
3784 }
3785 } else if (argc!=0)
3786 {
3787 return ERROR_COMMAND_SYNTAX_ERROR;
3788 }
3789
3790 command_print(cmd_ctx, "fast memory access is %s", (xscale->fast_memory_access) ? "enabled" : "disabled");
3791
3792 return ERROR_OK;
3793 }
3794
3795 int xscale_register_commands(struct command_context_s *cmd_ctx)
3796 {
3797 command_t *xscale_cmd;
3798
3799 xscale_cmd = register_command(cmd_ctx, NULL, "xscale", NULL, COMMAND_ANY, "xscale specific commands");
3800
3801 register_command(cmd_ctx, xscale_cmd, "debug_handler", xscale_handle_debug_handler_command, COMMAND_ANY, "'xscale debug_handler <target#> <address>' command takes two required operands");
3802 register_command(cmd_ctx, xscale_cmd, "cache_clean_address", xscale_handle_cache_clean_address_command, COMMAND_ANY, NULL);
3803
3804 register_command(cmd_ctx, xscale_cmd, "cache_info", xscale_handle_cache_info_command, COMMAND_EXEC, NULL);
3805 register_command(cmd_ctx, xscale_cmd, "mmu", xscale_handle_mmu_command, COMMAND_EXEC, "['enable'|'disable'] the MMU");
3806 register_command(cmd_ctx, xscale_cmd, "icache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the ICache");
3807 register_command(cmd_ctx, xscale_cmd, "dcache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the DCache");
3808
3809 register_command(cmd_ctx, xscale_cmd, "vector_catch", xscale_handle_idcache_command, COMMAND_EXEC, "<mask> of vectors that should be catched");
3810
3811 register_command(cmd_ctx, xscale_cmd, "trace_buffer", xscale_handle_trace_buffer_command, COMMAND_EXEC, "<enable|disable> ['fill' [n]|'wrap']");
3812
3813 register_command(cmd_ctx, xscale_cmd, "dump_trace", xscale_handle_dump_trace_command, COMMAND_EXEC, "dump content of trace buffer to <file>");
3814 register_command(cmd_ctx, xscale_cmd, "analyze_trace", xscale_handle_analyze_trace_buffer_command, COMMAND_EXEC, "analyze content of trace buffer");
3815 register_command(cmd_ctx, xscale_cmd, "trace_image", xscale_handle_trace_image_command,
3816 COMMAND_EXEC, "load image from <file> [base address]");
3817
3818 register_command(cmd_ctx, xscale_cmd, "cp15", xscale_handle_cp15, COMMAND_EXEC, "access coproc 15 <register> [value]");
3819 register_command(cmd_ctx, xscale_cmd, "fast_memory_access", handle_xscale_fast_memory_access_command,
3820 COMMAND_ANY, "use fast memory accesses instead of slower but potentially unsafe slow accesses <enable|disable>");
3821
3822
3823 armv4_5_register_commands(cmd_ctx);
3824
3825 return ERROR_OK;
3826 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)