- Work on fixing erase check. Many implementations are plain broken.
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program; if not, write to the *
17 * Free Software Foundation, Inc., *
18 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
19 ***************************************************************************/
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "replacements.h"
25
26 #include "xscale.h"
27
28 #include "register.h"
29 #include "target.h"
30 #include "armv4_5.h"
31 #include "arm_simulator.h"
32 #include "arm_disassembler.h"
33 #include "log.h"
34 #include "jtag.h"
35 #include "binarybuffer.h"
36 #include "time_support.h"
37 #include "breakpoints.h"
38 #include "fileio.h"
39
40 #include <stdlib.h>
41 #include <string.h>
42
43 #include <sys/types.h>
44 #include <unistd.h>
45 #include <errno.h>
46
47
48 /* cli handling */
49 int xscale_register_commands(struct command_context_s *cmd_ctx);
50
51 /* forward declarations */
52 int xscale_target_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc, struct target_s *target);
53 int xscale_init_target(struct command_context_s *cmd_ctx, struct target_s *target);
54 int xscale_quit();
55
56 int xscale_arch_state(struct target_s *target);
57 int xscale_poll(target_t *target);
58 int xscale_halt(target_t *target);
59 int xscale_resume(struct target_s *target, int current, u32 address, int handle_breakpoints, int debug_execution);
60 int xscale_step(struct target_s *target, int current, u32 address, int handle_breakpoints);
61 int xscale_debug_entry(target_t *target);
62 int xscale_restore_context(target_t *target);
63
64 int xscale_assert_reset(target_t *target);
65 int xscale_deassert_reset(target_t *target);
66 int xscale_soft_reset_halt(struct target_s *target);
67
68 int xscale_set_reg_u32(reg_t *reg, u32 value);
69
70 int xscale_read_core_reg(struct target_s *target, int num, enum armv4_5_mode mode);
71 int xscale_write_core_reg(struct target_s *target, int num, enum armv4_5_mode mode, u32 value);
72
73 int xscale_read_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer);
74 int xscale_write_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer);
75 int xscale_bulk_write_memory(target_t *target, u32 address, u32 count, u8 *buffer);
76 int xscale_checksum_memory(struct target_s *target, u32 address, u32 count, u32* checksum);
77
78 int xscale_add_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
79 int xscale_remove_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
80 int xscale_set_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
81 int xscale_unset_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
82 int xscale_add_watchpoint(struct target_s *target, watchpoint_t *watchpoint);
83 int xscale_remove_watchpoint(struct target_s *target, watchpoint_t *watchpoint);
84 void xscale_enable_watchpoints(struct target_s *target);
85 void xscale_enable_breakpoints(struct target_s *target);
86 static int xscale_virt2phys(struct target_s *target, u32 virtual, u32 *physical);
87 static int xscale_mmu(struct target_s *target, int *enabled);
88
89 int xscale_read_trace(target_t *target);
90
91 target_type_t xscale_target =
92 {
93 .name = "xscale",
94
95 .poll = xscale_poll,
96 .arch_state = xscale_arch_state,
97
98 .target_request_data = NULL,
99
100 .halt = xscale_halt,
101 .resume = xscale_resume,
102 .step = xscale_step,
103
104 .assert_reset = xscale_assert_reset,
105 .deassert_reset = xscale_deassert_reset,
106 .soft_reset_halt = xscale_soft_reset_halt,
107
108 .get_gdb_reg_list = armv4_5_get_gdb_reg_list,
109
110 .read_memory = xscale_read_memory,
111 .write_memory = xscale_write_memory,
112 .bulk_write_memory = xscale_bulk_write_memory,
113 .checksum_memory = xscale_checksum_memory,
114
115 .run_algorithm = armv4_5_run_algorithm,
116
117 .add_breakpoint = xscale_add_breakpoint,
118 .remove_breakpoint = xscale_remove_breakpoint,
119 .add_watchpoint = xscale_add_watchpoint,
120 .remove_watchpoint = xscale_remove_watchpoint,
121
122 .register_commands = xscale_register_commands,
123 .target_command = xscale_target_command,
124 .init_target = xscale_init_target,
125 .quit = xscale_quit,
126
127 .virt2phys = xscale_virt2phys,
128 .mmu = xscale_mmu
129 };
130
131 char* xscale_reg_list[] =
132 {
133 "XSCALE_MAINID", /* 0 */
134 "XSCALE_CACHETYPE",
135 "XSCALE_CTRL",
136 "XSCALE_AUXCTRL",
137 "XSCALE_TTB",
138 "XSCALE_DAC",
139 "XSCALE_FSR",
140 "XSCALE_FAR",
141 "XSCALE_PID",
142 "XSCALE_CPACCESS",
143 "XSCALE_IBCR0", /* 10 */
144 "XSCALE_IBCR1",
145 "XSCALE_DBR0",
146 "XSCALE_DBR1",
147 "XSCALE_DBCON",
148 "XSCALE_TBREG",
149 "XSCALE_CHKPT0",
150 "XSCALE_CHKPT1",
151 "XSCALE_DCSR",
152 "XSCALE_TX",
153 "XSCALE_RX", /* 20 */
154 "XSCALE_TXRXCTRL",
155 };
156
157 xscale_reg_t xscale_reg_arch_info[] =
158 {
159 {XSCALE_MAINID, NULL},
160 {XSCALE_CACHETYPE, NULL},
161 {XSCALE_CTRL, NULL},
162 {XSCALE_AUXCTRL, NULL},
163 {XSCALE_TTB, NULL},
164 {XSCALE_DAC, NULL},
165 {XSCALE_FSR, NULL},
166 {XSCALE_FAR, NULL},
167 {XSCALE_PID, NULL},
168 {XSCALE_CPACCESS, NULL},
169 {XSCALE_IBCR0, NULL},
170 {XSCALE_IBCR1, NULL},
171 {XSCALE_DBR0, NULL},
172 {XSCALE_DBR1, NULL},
173 {XSCALE_DBCON, NULL},
174 {XSCALE_TBREG, NULL},
175 {XSCALE_CHKPT0, NULL},
176 {XSCALE_CHKPT1, NULL},
177 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
178 {-1, NULL}, /* TX accessed via JTAG */
179 {-1, NULL}, /* RX accessed via JTAG */
180 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
181 };
182
183 int xscale_reg_arch_type = -1;
184
185 int xscale_get_reg(reg_t *reg);
186 int xscale_set_reg(reg_t *reg, u8 *buf);
187
188 int xscale_get_arch_pointers(target_t *target, armv4_5_common_t **armv4_5_p, xscale_common_t **xscale_p)
189 {
190 armv4_5_common_t *armv4_5 = target->arch_info;
191 xscale_common_t *xscale = armv4_5->arch_info;
192
193 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
194 {
195 LOG_ERROR("target isn't an XScale target");
196 return -1;
197 }
198
199 if (xscale->common_magic != XSCALE_COMMON_MAGIC)
200 {
201 LOG_ERROR("target isn't an XScale target");
202 return -1;
203 }
204
205 *armv4_5_p = armv4_5;
206 *xscale_p = xscale;
207
208 return ERROR_OK;
209 }
210
211 int xscale_jtag_set_instr(int chain_pos, u32 new_instr)
212 {
213 jtag_device_t *device = jtag_get_device(chain_pos);
214
215 if (buf_get_u32(device->cur_instr, 0, device->ir_length) != new_instr)
216 {
217 scan_field_t field;
218
219 field.device = chain_pos;
220 field.num_bits = device->ir_length;
221 field.out_value = calloc(CEIL(field.num_bits, 8), 1);
222 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
223 field.out_mask = NULL;
224 field.in_value = NULL;
225 jtag_set_check_value(&field, device->expected, device->expected_mask, NULL);
226
227 jtag_add_ir_scan(1, &field, -1);
228
229 free(field.out_value);
230 }
231
232 return ERROR_OK;
233 }
234
235 int xscale_read_dcsr(target_t *target)
236 {
237 armv4_5_common_t *armv4_5 = target->arch_info;
238 xscale_common_t *xscale = armv4_5->arch_info;
239
240 int retval;
241
242 scan_field_t fields[3];
243 u8 field0 = 0x0;
244 u8 field0_check_value = 0x2;
245 u8 field0_check_mask = 0x7;
246 u8 field2 = 0x0;
247 u8 field2_check_value = 0x0;
248 u8 field2_check_mask = 0x1;
249
250 jtag_add_end_state(TAP_PD);
251 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dcsr);
252
253 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
254 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
255
256 fields[0].device = xscale->jtag_info.chain_pos;
257 fields[0].num_bits = 3;
258 fields[0].out_value = &field0;
259 fields[0].out_mask = NULL;
260 fields[0].in_value = NULL;
261 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
262
263 fields[1].device = xscale->jtag_info.chain_pos;
264 fields[1].num_bits = 32;
265 fields[1].out_value = NULL;
266 fields[1].out_mask = NULL;
267 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
268 fields[1].in_handler = NULL;
269 fields[1].in_handler_priv = NULL;
270 fields[1].in_check_value = NULL;
271 fields[1].in_check_mask = NULL;
272
273 fields[2].device = xscale->jtag_info.chain_pos;
274 fields[2].num_bits = 1;
275 fields[2].out_value = &field2;
276 fields[2].out_mask = NULL;
277 fields[2].in_value = NULL;
278 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
279
280 jtag_add_dr_scan(3, fields, -1);
281
282 if ((retval = jtag_execute_queue()) != ERROR_OK)
283 {
284 LOG_ERROR("JTAG error while reading DCSR");
285 return retval;
286 }
287
288 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
289 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
290
291 /* write the register with the value we just read
292 * on this second pass, only the first bit of field0 is guaranteed to be 0)
293 */
294 field0_check_mask = 0x1;
295 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
296 fields[1].in_value = NULL;
297
298 jtag_add_end_state(TAP_RTI);
299
300 jtag_add_dr_scan(3, fields, -1);
301
302 /* DANGER!!! this must be here. It will make sure that the arguments
303 * to jtag_set_check_value() does not go out of scope! */
304 return jtag_execute_queue();
305 }
306
307 int xscale_receive(target_t *target, u32 *buffer, int num_words)
308 {
309 if (num_words==0)
310 return ERROR_INVALID_ARGUMENTS;
311
312 int retval=ERROR_OK;
313 armv4_5_common_t *armv4_5 = target->arch_info;
314 xscale_common_t *xscale = armv4_5->arch_info;
315
316 enum tap_state path[3];
317 scan_field_t fields[3];
318
319 u8 *field0 = malloc(num_words * 1);
320 u8 field0_check_value = 0x2;
321 u8 field0_check_mask = 0x6;
322 u32 *field1 = malloc(num_words * 4);
323 u8 field2_check_value = 0x0;
324 u8 field2_check_mask = 0x1;
325 int words_done = 0;
326 int words_scheduled = 0;
327
328 int i;
329
330 path[0] = TAP_SDS;
331 path[1] = TAP_CD;
332 path[2] = TAP_SD;
333
334 fields[0].device = xscale->jtag_info.chain_pos;
335 fields[0].num_bits = 3;
336 fields[0].out_value = NULL;
337 fields[0].out_mask = NULL;
338 fields[0].in_value = NULL;
339 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
340
341 fields[1].device = xscale->jtag_info.chain_pos;
342 fields[1].num_bits = 32;
343 fields[1].out_value = NULL;
344 fields[1].out_mask = NULL;
345 fields[1].in_value = NULL;
346 fields[1].in_handler = NULL;
347 fields[1].in_handler_priv = NULL;
348 fields[1].in_check_value = NULL;
349 fields[1].in_check_mask = NULL;
350
351
352
353 fields[2].device = xscale->jtag_info.chain_pos;
354 fields[2].num_bits = 1;
355 fields[2].out_value = NULL;
356 fields[2].out_mask = NULL;
357 fields[2].in_value = NULL;
358 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
359
360 jtag_add_end_state(TAP_RTI);
361 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgtx);
362 jtag_add_runtest(1, -1); /* ensures that we're in the TAP_RTI state as the above could be a no-op */
363
364 /* repeat until all words have been collected */
365 int attempts=0;
366 while (words_done < num_words)
367 {
368 /* schedule reads */
369 words_scheduled = 0;
370 for (i = words_done; i < num_words; i++)
371 {
372 fields[0].in_value = &field0[i];
373 fields[1].in_handler = buf_to_u32_handler;
374 fields[1].in_handler_priv = (u8*)&field1[i];
375
376 jtag_add_pathmove(3, path);
377 jtag_add_dr_scan(3, fields, TAP_RTI);
378 words_scheduled++;
379 }
380
381 if ((retval = jtag_execute_queue()) != ERROR_OK)
382 {
383 LOG_ERROR("JTAG error while receiving data from debug handler");
384 break;
385 }
386
387 /* examine results */
388 for (i = words_done; i < num_words; i++)
389 {
390 if (!(field0[0] & 1))
391 {
392 /* move backwards if necessary */
393 int j;
394 for (j = i; j < num_words - 1; j++)
395 {
396 field0[j] = field0[j+1];
397 field1[j] = field1[j+1];
398 }
399 words_scheduled--;
400 }
401 }
402 if (words_scheduled==0)
403 {
404 if (attempts++==1000)
405 {
406 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
407 retval=ERROR_TARGET_TIMEOUT;
408 break;
409 }
410 }
411
412 words_done += words_scheduled;
413 }
414
415 for (i = 0; i < num_words; i++)
416 *(buffer++) = buf_get_u32((u8*)&field1[i], 0, 32);
417
418 free(field1);
419
420 return retval;
421 }
422
423 int xscale_read_tx(target_t *target, int consume)
424 {
425 armv4_5_common_t *armv4_5 = target->arch_info;
426 xscale_common_t *xscale = armv4_5->arch_info;
427 enum tap_state path[3];
428 enum tap_state noconsume_path[6];
429
430 int retval;
431 struct timeval timeout, now;
432
433 scan_field_t fields[3];
434 u8 field0_in = 0x0;
435 u8 field0_check_value = 0x2;
436 u8 field0_check_mask = 0x6;
437 u8 field2_check_value = 0x0;
438 u8 field2_check_mask = 0x1;
439
440 jtag_add_end_state(TAP_RTI);
441
442 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgtx);
443
444 path[0] = TAP_SDS;
445 path[1] = TAP_CD;
446 path[2] = TAP_SD;
447
448 noconsume_path[0] = TAP_SDS;
449 noconsume_path[1] = TAP_CD;
450 noconsume_path[2] = TAP_E1D;
451 noconsume_path[3] = TAP_PD;
452 noconsume_path[4] = TAP_E2D;
453 noconsume_path[5] = TAP_SD;
454
455 fields[0].device = xscale->jtag_info.chain_pos;
456 fields[0].num_bits = 3;
457 fields[0].out_value = NULL;
458 fields[0].out_mask = NULL;
459 fields[0].in_value = &field0_in;
460 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
461
462 fields[1].device = xscale->jtag_info.chain_pos;
463 fields[1].num_bits = 32;
464 fields[1].out_value = NULL;
465 fields[1].out_mask = NULL;
466 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
467 fields[1].in_handler = NULL;
468 fields[1].in_handler_priv = NULL;
469 fields[1].in_check_value = NULL;
470 fields[1].in_check_mask = NULL;
471
472
473
474 fields[2].device = xscale->jtag_info.chain_pos;
475 fields[2].num_bits = 1;
476 fields[2].out_value = NULL;
477 fields[2].out_mask = NULL;
478 fields[2].in_value = NULL;
479 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
480
481 gettimeofday(&timeout, NULL);
482 timeval_add_time(&timeout, 1, 0);
483
484 for (;;)
485 {
486 int i;
487 for (i=0; i<100; i++)
488 {
489 /* if we want to consume the register content (i.e. clear TX_READY),
490 * we have to go straight from Capture-DR to Shift-DR
491 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
492 */
493 if (consume)
494 jtag_add_pathmove(3, path);
495 else
496 {
497 jtag_add_pathmove(sizeof(noconsume_path)/sizeof(*noconsume_path), noconsume_path);
498 }
499
500 jtag_add_dr_scan(3, fields, TAP_RTI);
501
502 if ((retval = jtag_execute_queue()) != ERROR_OK)
503 {
504 LOG_ERROR("JTAG error while reading TX");
505 return ERROR_TARGET_TIMEOUT;
506 }
507
508 gettimeofday(&now, NULL);
509 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
510 {
511 LOG_ERROR("time out reading TX register");
512 return ERROR_TARGET_TIMEOUT;
513 }
514 if (!((!(field0_in & 1)) && consume))
515 {
516 goto done;
517 }
518 }
519 LOG_DEBUG("waiting 10ms");
520 usleep(10*1000); /* avoid flooding the logs */
521 }
522 done:
523
524 if (!(field0_in & 1))
525 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
526
527 return ERROR_OK;
528 }
529
530 int xscale_write_rx(target_t *target)
531 {
532 armv4_5_common_t *armv4_5 = target->arch_info;
533 xscale_common_t *xscale = armv4_5->arch_info;
534
535 int retval;
536 struct timeval timeout, now;
537
538 scan_field_t fields[3];
539 u8 field0_out = 0x0;
540 u8 field0_in = 0x0;
541 u8 field0_check_value = 0x2;
542 u8 field0_check_mask = 0x6;
543 u8 field2 = 0x0;
544 u8 field2_check_value = 0x0;
545 u8 field2_check_mask = 0x1;
546
547 jtag_add_end_state(TAP_RTI);
548
549 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgrx);
550
551 fields[0].device = xscale->jtag_info.chain_pos;
552 fields[0].num_bits = 3;
553 fields[0].out_value = &field0_out;
554 fields[0].out_mask = NULL;
555 fields[0].in_value = &field0_in;
556 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
557
558 fields[1].device = xscale->jtag_info.chain_pos;
559 fields[1].num_bits = 32;
560 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
561 fields[1].out_mask = NULL;
562 fields[1].in_value = NULL;
563 fields[1].in_handler = NULL;
564 fields[1].in_handler_priv = NULL;
565 fields[1].in_check_value = NULL;
566 fields[1].in_check_mask = NULL;
567
568
569
570 fields[2].device = xscale->jtag_info.chain_pos;
571 fields[2].num_bits = 1;
572 fields[2].out_value = &field2;
573 fields[2].out_mask = NULL;
574 fields[2].in_value = NULL;
575 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
576
577 gettimeofday(&timeout, NULL);
578 timeval_add_time(&timeout, 1, 0);
579
580 /* poll until rx_read is low */
581 LOG_DEBUG("polling RX");
582 for (;;)
583 {
584 int i;
585 for (i=0; i<10; i++)
586 {
587 jtag_add_dr_scan(3, fields, TAP_RTI);
588
589 if ((retval = jtag_execute_queue()) != ERROR_OK)
590 {
591 LOG_ERROR("JTAG error while writing RX");
592 return retval;
593 }
594
595 gettimeofday(&now, NULL);
596 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
597 {
598 LOG_ERROR("time out writing RX register");
599 return ERROR_TARGET_TIMEOUT;
600 }
601 if (!(field0_in & 1))
602 goto done;
603 }
604 LOG_DEBUG("waiting 10ms");
605 usleep(10*1000); /* wait 10ms to avoid flooding the logs */
606 }
607 done:
608
609 /* set rx_valid */
610 field2 = 0x1;
611 jtag_add_dr_scan(3, fields, TAP_RTI);
612
613 if ((retval = jtag_execute_queue()) != ERROR_OK)
614 {
615 LOG_ERROR("JTAG error while writing RX");
616 return retval;
617 }
618
619 return ERROR_OK;
620 }
621
622 /* send count elements of size byte to the debug handler */
623 int xscale_send(target_t *target, u8 *buffer, int count, int size)
624 {
625 armv4_5_common_t *armv4_5 = target->arch_info;
626 xscale_common_t *xscale = armv4_5->arch_info;
627
628 int retval;
629
630 int done_count = 0;
631 u8 output[4] = {0, 0, 0, 0};
632
633 scan_field_t fields[3];
634 u8 field0_out = 0x0;
635 u8 field0_check_value = 0x2;
636 u8 field0_check_mask = 0x6;
637 u8 field2 = 0x1;
638 u8 field2_check_value = 0x0;
639 u8 field2_check_mask = 0x1;
640
641 jtag_add_end_state(TAP_RTI);
642
643 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgrx);
644
645 fields[0].device = xscale->jtag_info.chain_pos;
646 fields[0].num_bits = 3;
647 fields[0].out_value = &field0_out;
648 fields[0].out_mask = NULL;
649 fields[0].in_handler = NULL;
650 fields[0].in_value = NULL;
651 if (!xscale->fast_memory_access)
652 {
653 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
654 }
655
656 fields[1].device = xscale->jtag_info.chain_pos;
657 fields[1].num_bits = 32;
658 fields[1].out_value = output;
659 fields[1].out_mask = NULL;
660 fields[1].in_value = NULL;
661 fields[1].in_handler = NULL;
662 fields[1].in_handler_priv = NULL;
663 fields[1].in_check_value = NULL;
664 fields[1].in_check_mask = NULL;
665
666
667
668 fields[2].device = xscale->jtag_info.chain_pos;
669 fields[2].num_bits = 1;
670 fields[2].out_value = &field2;
671 fields[2].out_mask = NULL;
672 fields[2].in_value = NULL;
673 fields[2].in_handler = NULL;
674 if (!xscale->fast_memory_access)
675 {
676 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
677 }
678
679 if (size==4)
680 {
681 int endianness = target->endianness;
682 while (done_count++ < count)
683 {
684 if (endianness == TARGET_LITTLE_ENDIAN)
685 {
686 output[0]=buffer[0];
687 output[1]=buffer[1];
688 output[2]=buffer[2];
689 output[3]=buffer[3];
690 } else
691 {
692 output[0]=buffer[3];
693 output[1]=buffer[2];
694 output[2]=buffer[1];
695 output[3]=buffer[0];
696 }
697 jtag_add_dr_scan(3, fields, TAP_RTI);
698 buffer += size;
699 }
700
701 } else
702 {
703 while (done_count++ < count)
704 {
705 /* extract sized element from target-endian buffer, and put it
706 * into little-endian output buffer
707 */
708 switch (size)
709 {
710 case 2:
711 buf_set_u32(output, 0, 32, target_buffer_get_u16(target, buffer));
712 break;
713 case 1:
714 output[0] = *buffer;
715 break;
716 default:
717 LOG_ERROR("BUG: size neither 4, 2 nor 1");
718 exit(-1);
719 }
720
721 jtag_add_dr_scan(3, fields, TAP_RTI);
722 buffer += size;
723 }
724
725 }
726
727 if ((retval = jtag_execute_queue()) != ERROR_OK)
728 {
729 LOG_ERROR("JTAG error while sending data to debug handler");
730 return retval;
731 }
732
733 return ERROR_OK;
734 }
735
736 int xscale_send_u32(target_t *target, u32 value)
737 {
738 armv4_5_common_t *armv4_5 = target->arch_info;
739 xscale_common_t *xscale = armv4_5->arch_info;
740
741 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
742 return xscale_write_rx(target);
743 }
744
745 int xscale_write_dcsr(target_t *target, int hold_rst, int ext_dbg_brk)
746 {
747 armv4_5_common_t *armv4_5 = target->arch_info;
748 xscale_common_t *xscale = armv4_5->arch_info;
749
750 int retval;
751
752 scan_field_t fields[3];
753 u8 field0 = 0x0;
754 u8 field0_check_value = 0x2;
755 u8 field0_check_mask = 0x7;
756 u8 field2 = 0x0;
757 u8 field2_check_value = 0x0;
758 u8 field2_check_mask = 0x1;
759
760 if (hold_rst != -1)
761 xscale->hold_rst = hold_rst;
762
763 if (ext_dbg_brk != -1)
764 xscale->external_debug_break = ext_dbg_brk;
765
766 jtag_add_end_state(TAP_RTI);
767 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dcsr);
768
769 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
770 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
771
772 fields[0].device = xscale->jtag_info.chain_pos;
773 fields[0].num_bits = 3;
774 fields[0].out_value = &field0;
775 fields[0].out_mask = NULL;
776 fields[0].in_value = NULL;
777 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
778
779 fields[1].device = xscale->jtag_info.chain_pos;
780 fields[1].num_bits = 32;
781 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
782 fields[1].out_mask = NULL;
783 fields[1].in_value = NULL;
784 fields[1].in_handler = NULL;
785 fields[1].in_handler_priv = NULL;
786 fields[1].in_check_value = NULL;
787 fields[1].in_check_mask = NULL;
788
789
790
791 fields[2].device = xscale->jtag_info.chain_pos;
792 fields[2].num_bits = 1;
793 fields[2].out_value = &field2;
794 fields[2].out_mask = NULL;
795 fields[2].in_value = NULL;
796 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
797
798 jtag_add_dr_scan(3, fields, -1);
799
800 if ((retval = jtag_execute_queue()) != ERROR_OK)
801 {
802 LOG_ERROR("JTAG error while writing DCSR");
803 return retval;
804 }
805
806 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
807 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
808
809 return ERROR_OK;
810 }
811
812 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
813 unsigned int parity (unsigned int v)
814 {
815 unsigned int ov = v;
816 v ^= v >> 16;
817 v ^= v >> 8;
818 v ^= v >> 4;
819 v &= 0xf;
820 LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
821 return (0x6996 >> v) & 1;
822 }
823
824 int xscale_load_ic(target_t *target, int mini, u32 va, u32 buffer[8])
825 {
826 armv4_5_common_t *armv4_5 = target->arch_info;
827 xscale_common_t *xscale = armv4_5->arch_info;
828 u8 packet[4];
829 u8 cmd;
830 int word;
831
832 scan_field_t fields[2];
833
834 LOG_DEBUG("loading miniIC at 0x%8.8x", va);
835
836 jtag_add_end_state(TAP_RTI);
837 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.ldic); /* LDIC */
838
839 /* CMD is b010 for Main IC and b011 for Mini IC */
840 if (mini)
841 buf_set_u32(&cmd, 0, 3, 0x3);
842 else
843 buf_set_u32(&cmd, 0, 3, 0x2);
844
845 buf_set_u32(&cmd, 3, 3, 0x0);
846
847 /* virtual address of desired cache line */
848 buf_set_u32(packet, 0, 27, va >> 5);
849
850 fields[0].device = xscale->jtag_info.chain_pos;
851 fields[0].num_bits = 6;
852 fields[0].out_value = &cmd;
853 fields[0].out_mask = NULL;
854 fields[0].in_value = NULL;
855 fields[0].in_check_value = NULL;
856 fields[0].in_check_mask = NULL;
857 fields[0].in_handler = NULL;
858 fields[0].in_handler_priv = NULL;
859
860 fields[1].device = xscale->jtag_info.chain_pos;
861 fields[1].num_bits = 27;
862 fields[1].out_value = packet;
863 fields[1].out_mask = NULL;
864 fields[1].in_value = NULL;
865 fields[1].in_check_value = NULL;
866 fields[1].in_check_mask = NULL;
867 fields[1].in_handler = NULL;
868 fields[1].in_handler_priv = NULL;
869
870 jtag_add_dr_scan(2, fields, -1);
871
872 fields[0].num_bits = 32;
873 fields[0].out_value = packet;
874
875 fields[1].num_bits = 1;
876 fields[1].out_value = &cmd;
877
878 for (word = 0; word < 8; word++)
879 {
880 buf_set_u32(packet, 0, 32, buffer[word]);
881 cmd = parity(*((u32*)packet));
882 jtag_add_dr_scan(2, fields, -1);
883 }
884
885 jtag_execute_queue();
886
887 return ERROR_OK;
888 }
889
890 int xscale_invalidate_ic_line(target_t *target, u32 va)
891 {
892 armv4_5_common_t *armv4_5 = target->arch_info;
893 xscale_common_t *xscale = armv4_5->arch_info;
894 u8 packet[4];
895 u8 cmd;
896
897 scan_field_t fields[2];
898
899 jtag_add_end_state(TAP_RTI);
900 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.ldic); /* LDIC */
901
902 /* CMD for invalidate IC line b000, bits [6:4] b000 */
903 buf_set_u32(&cmd, 0, 6, 0x0);
904
905 /* virtual address of desired cache line */
906 buf_set_u32(packet, 0, 27, va >> 5);
907
908 fields[0].device = xscale->jtag_info.chain_pos;
909 fields[0].num_bits = 6;
910 fields[0].out_value = &cmd;
911 fields[0].out_mask = NULL;
912 fields[0].in_value = NULL;
913 fields[0].in_check_value = NULL;
914 fields[0].in_check_mask = NULL;
915 fields[0].in_handler = NULL;
916 fields[0].in_handler_priv = NULL;
917
918 fields[1].device = xscale->jtag_info.chain_pos;
919 fields[1].num_bits = 27;
920 fields[1].out_value = packet;
921 fields[1].out_mask = NULL;
922 fields[1].in_value = NULL;
923 fields[1].in_check_value = NULL;
924 fields[1].in_check_mask = NULL;
925 fields[1].in_handler = NULL;
926 fields[1].in_handler_priv = NULL;
927
928 jtag_add_dr_scan(2, fields, -1);
929
930 return ERROR_OK;
931 }
932
933 int xscale_update_vectors(target_t *target)
934 {
935 armv4_5_common_t *armv4_5 = target->arch_info;
936 xscale_common_t *xscale = armv4_5->arch_info;
937 int i;
938 int retval;
939
940 u32 low_reset_branch, high_reset_branch;
941
942 for (i = 1; i < 8; i++)
943 {
944 /* if there's a static vector specified for this exception, override */
945 if (xscale->static_high_vectors_set & (1 << i))
946 {
947 xscale->high_vectors[i] = xscale->static_high_vectors[i];
948 }
949 else
950 {
951 retval=target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
952 if (retval == ERROR_TARGET_TIMEOUT)
953 return retval;
954 if (retval!=ERROR_OK)
955 {
956 /* Some of these reads will fail as part of normal execution */
957 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
958 }
959 }
960 }
961
962 for (i = 1; i < 8; i++)
963 {
964 if (xscale->static_low_vectors_set & (1 << i))
965 {
966 xscale->low_vectors[i] = xscale->static_low_vectors[i];
967 }
968 else
969 {
970 retval=target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
971 if (retval == ERROR_TARGET_TIMEOUT)
972 return retval;
973 if (retval!=ERROR_OK)
974 {
975 /* Some of these reads will fail as part of normal execution */
976 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
977 }
978 }
979 }
980
981 /* calculate branches to debug handler */
982 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
983 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
984
985 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
986 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
987
988 /* invalidate and load exception vectors in mini i-cache */
989 xscale_invalidate_ic_line(target, 0x0);
990 xscale_invalidate_ic_line(target, 0xffff0000);
991
992 xscale_load_ic(target, 1, 0x0, xscale->low_vectors);
993 xscale_load_ic(target, 1, 0xffff0000, xscale->high_vectors);
994
995 return ERROR_OK;
996 }
997
998 int xscale_arch_state(struct target_s *target)
999 {
1000 armv4_5_common_t *armv4_5 = target->arch_info;
1001 xscale_common_t *xscale = armv4_5->arch_info;
1002
1003 char *state[] =
1004 {
1005 "disabled", "enabled"
1006 };
1007
1008 char *arch_dbg_reason[] =
1009 {
1010 "", "\n(processor reset)", "\n(trace buffer full)"
1011 };
1012
1013 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
1014 {
1015 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
1016 exit(-1);
1017 }
1018
1019 LOG_USER("target halted in %s state due to %s, current mode: %s\n"
1020 "cpsr: 0x%8.8x pc: 0x%8.8x\n"
1021 "MMU: %s, D-Cache: %s, I-Cache: %s"
1022 "%s",
1023 armv4_5_state_strings[armv4_5->core_state],
1024 target_debug_reason_strings[target->debug_reason],
1025 armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)],
1026 buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32),
1027 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32),
1028 state[xscale->armv4_5_mmu.mmu_enabled],
1029 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
1030 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
1031 arch_dbg_reason[xscale->arch_debug_reason]);
1032
1033 return ERROR_OK;
1034 }
1035
1036 int xscale_poll(target_t *target)
1037 {
1038 int retval=ERROR_OK;
1039 armv4_5_common_t *armv4_5 = target->arch_info;
1040 xscale_common_t *xscale = armv4_5->arch_info;
1041
1042 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
1043 {
1044 enum target_state previous_state = target->state;
1045 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
1046 {
1047
1048 /* there's data to read from the tx register, we entered debug state */
1049 xscale->handler_running = 1;
1050
1051 target->state = TARGET_HALTED;
1052
1053 /* process debug entry, fetching current mode regs */
1054 retval = xscale_debug_entry(target);
1055 }
1056 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
1057 {
1058 LOG_USER("error while polling TX register, reset CPU");
1059 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
1060 target->state = TARGET_HALTED;
1061 }
1062
1063 /* debug_entry could have overwritten target state (i.e. immediate resume)
1064 * don't signal event handlers in that case
1065 */
1066 if (target->state != TARGET_HALTED)
1067 return ERROR_OK;
1068
1069 /* if target was running, signal that we halted
1070 * otherwise we reentered from debug execution */
1071 if (previous_state == TARGET_RUNNING)
1072 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1073 else
1074 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
1075 }
1076
1077 return retval;
1078 }
1079
1080 int xscale_debug_entry(target_t *target)
1081 {
1082 armv4_5_common_t *armv4_5 = target->arch_info;
1083 xscale_common_t *xscale = armv4_5->arch_info;
1084 u32 pc;
1085 u32 buffer[10];
1086 int i;
1087 int retval;
1088
1089 u32 moe;
1090
1091 /* clear external dbg break (will be written on next DCSR read) */
1092 xscale->external_debug_break = 0;
1093 if ((retval=xscale_read_dcsr(target))!=ERROR_OK)
1094 return retval;
1095
1096 /* get r0, pc, r1 to r7 and cpsr */
1097 if ((retval=xscale_receive(target, buffer, 10))!=ERROR_OK)
1098 return retval;
1099
1100 /* move r0 from buffer to register cache */
1101 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
1102 armv4_5->core_cache->reg_list[15].dirty = 1;
1103 armv4_5->core_cache->reg_list[15].valid = 1;
1104 LOG_DEBUG("r0: 0x%8.8x", buffer[0]);
1105
1106 /* move pc from buffer to register cache */
1107 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
1108 armv4_5->core_cache->reg_list[15].dirty = 1;
1109 armv4_5->core_cache->reg_list[15].valid = 1;
1110 LOG_DEBUG("pc: 0x%8.8x", buffer[1]);
1111
1112 /* move data from buffer to register cache */
1113 for (i = 1; i <= 7; i++)
1114 {
1115 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
1116 armv4_5->core_cache->reg_list[i].dirty = 1;
1117 armv4_5->core_cache->reg_list[i].valid = 1;
1118 LOG_DEBUG("r%i: 0x%8.8x", i, buffer[i + 1]);
1119 }
1120
1121 buf_set_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32, buffer[9]);
1122 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty = 1;
1123 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].valid = 1;
1124 LOG_DEBUG("cpsr: 0x%8.8x", buffer[9]);
1125
1126 armv4_5->core_mode = buffer[9] & 0x1f;
1127 if (armv4_5_mode_to_number(armv4_5->core_mode) == -1)
1128 {
1129 target->state = TARGET_UNKNOWN;
1130 LOG_ERROR("cpsr contains invalid mode value - communication failure");
1131 return ERROR_TARGET_FAILURE;
1132 }
1133 LOG_DEBUG("target entered debug state in %s mode", armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)]);
1134
1135 if (buffer[9] & 0x20)
1136 armv4_5->core_state = ARMV4_5_STATE_THUMB;
1137 else
1138 armv4_5->core_state = ARMV4_5_STATE_ARM;
1139
1140 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1141 if ((armv4_5->core_mode != ARMV4_5_MODE_USR) && (armv4_5->core_mode != ARMV4_5_MODE_SYS))
1142 {
1143 xscale_receive(target, buffer, 8);
1144 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1145 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).dirty = 0;
1146 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).valid = 1;
1147 }
1148 else
1149 {
1150 /* r8 to r14, but no spsr */
1151 xscale_receive(target, buffer, 7);
1152 }
1153
1154 /* move data from buffer to register cache */
1155 for (i = 8; i <= 14; i++)
1156 {
1157 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).value, 0, 32, buffer[i - 8]);
1158 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 0;
1159 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid = 1;
1160 }
1161
1162 /* examine debug reason */
1163 xscale_read_dcsr(target);
1164 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
1165
1166 /* stored PC (for calculating fixup) */
1167 pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1168
1169 switch (moe)
1170 {
1171 case 0x0: /* Processor reset */
1172 target->debug_reason = DBG_REASON_DBGRQ;
1173 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
1174 pc -= 4;
1175 break;
1176 case 0x1: /* Instruction breakpoint hit */
1177 target->debug_reason = DBG_REASON_BREAKPOINT;
1178 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1179 pc -= 4;
1180 break;
1181 case 0x2: /* Data breakpoint hit */
1182 target->debug_reason = DBG_REASON_WATCHPOINT;
1183 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1184 pc -= 4;
1185 break;
1186 case 0x3: /* BKPT instruction executed */
1187 target->debug_reason = DBG_REASON_BREAKPOINT;
1188 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1189 pc -= 4;
1190 break;
1191 case 0x4: /* Ext. debug event */
1192 target->debug_reason = DBG_REASON_DBGRQ;
1193 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1194 pc -= 4;
1195 break;
1196 case 0x5: /* Vector trap occured */
1197 target->debug_reason = DBG_REASON_BREAKPOINT;
1198 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1199 pc -= 4;
1200 break;
1201 case 0x6: /* Trace buffer full break */
1202 target->debug_reason = DBG_REASON_DBGRQ;
1203 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1204 pc -= 4;
1205 break;
1206 case 0x7: /* Reserved */
1207 default:
1208 LOG_ERROR("Method of Entry is 'Reserved'");
1209 exit(-1);
1210 break;
1211 }
1212
1213 /* apply PC fixup */
1214 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
1215
1216 /* on the first debug entry, identify cache type */
1217 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1218 {
1219 u32 cache_type_reg;
1220
1221 /* read cp15 cache type register */
1222 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1223 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1224
1225 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1226 }
1227
1228 /* examine MMU and Cache settings */
1229 /* read cp15 control register */
1230 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1231 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1232 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1233 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1234 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1235
1236 /* tracing enabled, read collected trace data */
1237 if (xscale->trace.buffer_enabled)
1238 {
1239 xscale_read_trace(target);
1240 xscale->trace.buffer_fill--;
1241
1242 /* resume if we're still collecting trace data */
1243 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1244 && (xscale->trace.buffer_fill > 0))
1245 {
1246 xscale_resume(target, 1, 0x0, 1, 0);
1247 }
1248 else
1249 {
1250 xscale->trace.buffer_enabled = 0;
1251 }
1252 }
1253
1254 return ERROR_OK;
1255 }
1256
1257 int xscale_halt(target_t *target)
1258 {
1259 armv4_5_common_t *armv4_5 = target->arch_info;
1260 xscale_common_t *xscale = armv4_5->arch_info;
1261
1262 LOG_DEBUG("target->state: %s", target_state_strings[target->state]);
1263
1264 if (target->state == TARGET_HALTED)
1265 {
1266 LOG_WARNING("target was already halted");
1267 return ERROR_OK;
1268 }
1269 else if (target->state == TARGET_UNKNOWN)
1270 {
1271 /* this must not happen for a xscale target */
1272 LOG_ERROR("target was in unknown state when halt was requested");
1273 return ERROR_TARGET_INVALID;
1274 }
1275 else if (target->state == TARGET_RESET)
1276 {
1277 LOG_DEBUG("target->state == TARGET_RESET");
1278 }
1279 else
1280 {
1281 /* assert external dbg break */
1282 xscale->external_debug_break = 1;
1283 xscale_read_dcsr(target);
1284
1285 target->debug_reason = DBG_REASON_DBGRQ;
1286 }
1287
1288 return ERROR_OK;
1289 }
1290
1291 int xscale_enable_single_step(struct target_s *target, u32 next_pc)
1292 {
1293 armv4_5_common_t *armv4_5 = target->arch_info;
1294 xscale_common_t *xscale= armv4_5->arch_info;
1295 reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1296
1297 if (xscale->ibcr0_used)
1298 {
1299 breakpoint_t *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1300
1301 if (ibcr0_bp)
1302 {
1303 xscale_unset_breakpoint(target, ibcr0_bp);
1304 }
1305 else
1306 {
1307 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1308 exit(-1);
1309 }
1310 }
1311
1312 xscale_set_reg_u32(ibcr0, next_pc | 0x1);
1313
1314 return ERROR_OK;
1315 }
1316
1317 int xscale_disable_single_step(struct target_s *target)
1318 {
1319 armv4_5_common_t *armv4_5 = target->arch_info;
1320 xscale_common_t *xscale= armv4_5->arch_info;
1321 reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1322
1323 xscale_set_reg_u32(ibcr0, 0x0);
1324
1325 return ERROR_OK;
1326 }
1327
1328 int xscale_resume(struct target_s *target, int current, u32 address, int handle_breakpoints, int debug_execution)
1329 {
1330 armv4_5_common_t *armv4_5 = target->arch_info;
1331 xscale_common_t *xscale= armv4_5->arch_info;
1332 breakpoint_t *breakpoint = target->breakpoints;
1333
1334 u32 current_pc;
1335
1336 int retval;
1337 int i;
1338
1339 LOG_DEBUG("-");
1340
1341 if (target->state != TARGET_HALTED)
1342 {
1343 LOG_WARNING("target not halted");
1344 return ERROR_TARGET_NOT_HALTED;
1345 }
1346
1347 if (!debug_execution)
1348 {
1349 target_free_all_working_areas(target);
1350 }
1351
1352 /* update vector tables */
1353 if ((retval=xscale_update_vectors(target))!=ERROR_OK)
1354 return retval;
1355
1356 /* current = 1: continue on current pc, otherwise continue at <address> */
1357 if (!current)
1358 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1359
1360 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1361
1362 /* if we're at the reset vector, we have to simulate the branch */
1363 if (current_pc == 0x0)
1364 {
1365 arm_simulate_step(target, NULL);
1366 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1367 }
1368
1369 /* the front-end may request us not to handle breakpoints */
1370 if (handle_breakpoints)
1371 {
1372 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1373 {
1374 u32 next_pc;
1375
1376 /* there's a breakpoint at the current PC, we have to step over it */
1377 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1378 xscale_unset_breakpoint(target, breakpoint);
1379
1380 /* calculate PC of next instruction */
1381 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1382 {
1383 u32 current_opcode;
1384 target_read_u32(target, current_pc, &current_opcode);
1385 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8x", current_opcode);
1386 }
1387
1388 LOG_DEBUG("enable single-step");
1389 xscale_enable_single_step(target, next_pc);
1390
1391 /* restore banked registers */
1392 xscale_restore_context(target);
1393
1394 /* send resume request (command 0x30 or 0x31)
1395 * clean the trace buffer if it is to be enabled (0x62) */
1396 if (xscale->trace.buffer_enabled)
1397 {
1398 xscale_send_u32(target, 0x62);
1399 xscale_send_u32(target, 0x31);
1400 }
1401 else
1402 xscale_send_u32(target, 0x30);
1403
1404 /* send CPSR */
1405 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1406 LOG_DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1407
1408 for (i = 7; i >= 0; i--)
1409 {
1410 /* send register */
1411 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1412 LOG_DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1413 }
1414
1415 /* send PC */
1416 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1417 LOG_DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1418
1419 /* wait for and process debug entry */
1420 xscale_debug_entry(target);
1421
1422 LOG_DEBUG("disable single-step");
1423 xscale_disable_single_step(target);
1424
1425 LOG_DEBUG("set breakpoint at 0x%8.8x", breakpoint->address);
1426 xscale_set_breakpoint(target, breakpoint);
1427 }
1428 }
1429
1430 /* enable any pending breakpoints and watchpoints */
1431 xscale_enable_breakpoints(target);
1432 xscale_enable_watchpoints(target);
1433
1434 /* restore banked registers */
1435 xscale_restore_context(target);
1436
1437 /* send resume request (command 0x30 or 0x31)
1438 * clean the trace buffer if it is to be enabled (0x62) */
1439 if (xscale->trace.buffer_enabled)
1440 {
1441 xscale_send_u32(target, 0x62);
1442 xscale_send_u32(target, 0x31);
1443 }
1444 else
1445 xscale_send_u32(target, 0x30);
1446
1447 /* send CPSR */
1448 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1449 LOG_DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1450
1451 for (i = 7; i >= 0; i--)
1452 {
1453 /* send register */
1454 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1455 LOG_DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1456 }
1457
1458 /* send PC */
1459 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1460 LOG_DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1461
1462 target->debug_reason = DBG_REASON_NOTHALTED;
1463
1464 if (!debug_execution)
1465 {
1466 /* registers are now invalid */
1467 armv4_5_invalidate_core_regs(target);
1468 target->state = TARGET_RUNNING;
1469 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1470 }
1471 else
1472 {
1473 target->state = TARGET_DEBUG_RUNNING;
1474 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1475 }
1476
1477 LOG_DEBUG("target resumed");
1478
1479 xscale->handler_running = 1;
1480
1481 return ERROR_OK;
1482 }
1483
1484 int xscale_step(struct target_s *target, int current, u32 address, int handle_breakpoints)
1485 {
1486 armv4_5_common_t *armv4_5 = target->arch_info;
1487 xscale_common_t *xscale = armv4_5->arch_info;
1488 breakpoint_t *breakpoint = target->breakpoints;
1489
1490 u32 current_pc, next_pc;
1491 int i;
1492 int retval;
1493
1494 if (target->state != TARGET_HALTED)
1495 {
1496 LOG_WARNING("target not halted");
1497 return ERROR_TARGET_NOT_HALTED;
1498 }
1499
1500 /* current = 1: continue on current pc, otherwise continue at <address> */
1501 if (!current)
1502 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1503
1504 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1505
1506 /* if we're at the reset vector, we have to simulate the step */
1507 if (current_pc == 0x0)
1508 {
1509 arm_simulate_step(target, NULL);
1510 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1511
1512 target->debug_reason = DBG_REASON_SINGLESTEP;
1513 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1514
1515 return ERROR_OK;
1516 }
1517
1518 /* the front-end may request us not to handle breakpoints */
1519 if (handle_breakpoints)
1520 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1521 {
1522 xscale_unset_breakpoint(target, breakpoint);
1523 }
1524
1525 target->debug_reason = DBG_REASON_SINGLESTEP;
1526
1527 /* calculate PC of next instruction */
1528 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1529 {
1530 u32 current_opcode;
1531 target_read_u32(target, current_pc, &current_opcode);
1532 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8x", current_opcode);
1533 }
1534
1535 LOG_DEBUG("enable single-step");
1536 xscale_enable_single_step(target, next_pc);
1537
1538 /* restore banked registers */
1539 xscale_restore_context(target);
1540
1541 /* send resume request (command 0x30 or 0x31)
1542 * clean the trace buffer if it is to be enabled (0x62) */
1543 if (xscale->trace.buffer_enabled)
1544 {
1545 xscale_send_u32(target, 0x62);
1546 xscale_send_u32(target, 0x31);
1547 }
1548 else
1549 xscale_send_u32(target, 0x30);
1550
1551 /* send CPSR */
1552 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1553 LOG_DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1554
1555 for (i = 7; i >= 0; i--)
1556 {
1557 /* send register */
1558 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1559 LOG_DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1560 }
1561
1562 /* send PC */
1563 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1564 LOG_DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1565
1566 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1567
1568 /* registers are now invalid */
1569 armv4_5_invalidate_core_regs(target);
1570
1571 /* wait for and process debug entry */
1572 xscale_debug_entry(target);
1573
1574 LOG_DEBUG("disable single-step");
1575 xscale_disable_single_step(target);
1576
1577 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1578
1579 if (breakpoint)
1580 {
1581 xscale_set_breakpoint(target, breakpoint);
1582 }
1583
1584 LOG_DEBUG("target stepped");
1585
1586 return ERROR_OK;
1587
1588 }
1589
1590 int xscale_assert_reset(target_t *target)
1591 {
1592 armv4_5_common_t *armv4_5 = target->arch_info;
1593 xscale_common_t *xscale = armv4_5->arch_info;
1594
1595 LOG_DEBUG("target->state: %s", target_state_strings[target->state]);
1596
1597 /* TRST every time. We want to be able to support daemon_startup attach */
1598 jtag_add_reset(1, 0);
1599 jtag_add_sleep(5000);
1600 jtag_add_reset(0, 0);
1601 jtag_add_sleep(5000);
1602 jtag_execute_queue();
1603
1604
1605
1606 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1607 * end up in T-L-R, which would reset JTAG
1608 */
1609 jtag_add_end_state(TAP_RTI);
1610 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dcsr);
1611
1612 /* set Hold reset, Halt mode and Trap Reset */
1613 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1614 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1615 xscale_write_dcsr(target, 1, 0);
1616
1617 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1618 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, 0x7f);
1619 jtag_execute_queue();
1620
1621 /* assert reset */
1622 jtag_add_reset(0, 1);
1623
1624 /* sleep 1ms, to be sure we fulfill any requirements */
1625 jtag_add_sleep(1000);
1626 jtag_execute_queue();
1627
1628 target->state = TARGET_RESET;
1629
1630 return ERROR_OK;
1631 }
1632
1633 int xscale_deassert_reset(target_t *target)
1634 {
1635 armv4_5_common_t *armv4_5 = target->arch_info;
1636 xscale_common_t *xscale = armv4_5->arch_info;
1637
1638 fileio_t debug_handler;
1639 u32 address;
1640 u32 binary_size;
1641
1642 u32 buf_cnt;
1643 int i;
1644 int retval;
1645
1646 breakpoint_t *breakpoint = target->breakpoints;
1647
1648 LOG_DEBUG("-");
1649
1650 xscale->ibcr_available = 2;
1651 xscale->ibcr0_used = 0;
1652 xscale->ibcr1_used = 0;
1653
1654 xscale->dbr_available = 2;
1655 xscale->dbr0_used = 0;
1656 xscale->dbr1_used = 0;
1657
1658 /* mark all hardware breakpoints as unset */
1659 while (breakpoint)
1660 {
1661 if (breakpoint->type == BKPT_HARD)
1662 {
1663 breakpoint->set = 0;
1664 }
1665 breakpoint = breakpoint->next;
1666 }
1667
1668 if (!xscale->handler_installed)
1669 {
1670 /* release SRST */
1671 jtag_add_reset(0, 0);
1672
1673 /* wait 300ms; 150 and 100ms were not enough */
1674 jtag_add_sleep(300*1000);
1675
1676 jtag_add_runtest(2030, TAP_RTI);
1677 jtag_execute_queue();
1678
1679 /* set Hold reset, Halt mode and Trap Reset */
1680 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1681 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1682 xscale_write_dcsr(target, 1, 0);
1683
1684 /* Load debug handler */
1685 if (fileio_open(&debug_handler, "xscale/debug_handler.bin", FILEIO_READ, FILEIO_BINARY) != ERROR_OK)
1686 {
1687 return ERROR_OK;
1688 }
1689
1690 if ((binary_size = debug_handler.size) % 4)
1691 {
1692 LOG_ERROR("debug_handler.bin: size not a multiple of 4");
1693 exit(-1);
1694 }
1695
1696 if (binary_size > 0x800)
1697 {
1698 LOG_ERROR("debug_handler.bin: larger than 2kb");
1699 exit(-1);
1700 }
1701
1702 binary_size = CEIL(binary_size, 32) * 32;
1703
1704 address = xscale->handler_address;
1705 while (binary_size > 0)
1706 {
1707 u32 cache_line[8];
1708 u8 buffer[32];
1709
1710 if ((retval = fileio_read(&debug_handler, 32, buffer, &buf_cnt)) != ERROR_OK)
1711 {
1712
1713 }
1714
1715 for (i = 0; i < buf_cnt; i += 4)
1716 {
1717 /* convert LE buffer to host-endian u32 */
1718 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1719 }
1720
1721 for (; buf_cnt < 32; buf_cnt += 4)
1722 {
1723 cache_line[buf_cnt / 4] = 0xe1a08008;
1724 }
1725
1726 /* only load addresses other than the reset vectors */
1727 if ((address % 0x400) != 0x0)
1728 {
1729 xscale_load_ic(target, 1, address, cache_line);
1730 }
1731
1732 address += buf_cnt;
1733 binary_size -= buf_cnt;
1734 };
1735
1736 xscale_load_ic(target, 1, 0x0, xscale->low_vectors);
1737 xscale_load_ic(target, 1, 0xffff0000, xscale->high_vectors);
1738
1739 jtag_add_runtest(30, TAP_RTI);
1740
1741 jtag_add_sleep(100000);
1742
1743 /* set Hold reset, Halt mode and Trap Reset */
1744 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1745 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1746 xscale_write_dcsr(target, 1, 0);
1747
1748 /* clear Hold reset to let the target run (should enter debug handler) */
1749 xscale_write_dcsr(target, 0, 1);
1750 target->state = TARGET_RUNNING;
1751
1752 if ((target->reset_mode != RESET_HALT) && (target->reset_mode != RESET_INIT))
1753 {
1754 jtag_add_sleep(10000);
1755
1756 /* we should have entered debug now */
1757 xscale_debug_entry(target);
1758 target->state = TARGET_HALTED;
1759
1760 /* resume the target */
1761 xscale_resume(target, 1, 0x0, 1, 0);
1762 }
1763
1764 fileio_close(&debug_handler);
1765 }
1766 else
1767 {
1768 jtag_add_reset(0, 0);
1769 }
1770
1771
1772 return ERROR_OK;
1773 }
1774
1775 int xscale_soft_reset_halt(struct target_s *target)
1776 {
1777
1778 return ERROR_OK;
1779 }
1780
1781 int xscale_read_core_reg(struct target_s *target, int num, enum armv4_5_mode mode)
1782 {
1783
1784 return ERROR_OK;
1785 }
1786
1787 int xscale_write_core_reg(struct target_s *target, int num, enum armv4_5_mode mode, u32 value)
1788 {
1789
1790 return ERROR_OK;
1791 }
1792
1793 int xscale_full_context(target_t *target)
1794 {
1795 armv4_5_common_t *armv4_5 = target->arch_info;
1796
1797 u32 *buffer;
1798
1799 int i, j;
1800
1801 LOG_DEBUG("-");
1802
1803 if (target->state != TARGET_HALTED)
1804 {
1805 LOG_WARNING("target not halted");
1806 return ERROR_TARGET_NOT_HALTED;
1807 }
1808
1809 buffer = malloc(4 * 8);
1810
1811 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1812 * we can't enter User mode on an XScale (unpredictable),
1813 * but User shares registers with SYS
1814 */
1815 for(i = 1; i < 7; i++)
1816 {
1817 int valid = 1;
1818
1819 /* check if there are invalid registers in the current mode
1820 */
1821 for (j = 0; j <= 16; j++)
1822 {
1823 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid == 0)
1824 valid = 0;
1825 }
1826
1827 if (!valid)
1828 {
1829 u32 tmp_cpsr;
1830
1831 /* request banked registers */
1832 xscale_send_u32(target, 0x0);
1833
1834 tmp_cpsr = 0x0;
1835 tmp_cpsr |= armv4_5_number_to_mode(i);
1836 tmp_cpsr |= 0xc0; /* I/F bits */
1837
1838 /* send CPSR for desired mode */
1839 xscale_send_u32(target, tmp_cpsr);
1840
1841 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1842 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1843 {
1844 xscale_receive(target, buffer, 8);
1845 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1846 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1847 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).valid = 1;
1848 }
1849 else
1850 {
1851 xscale_receive(target, buffer, 7);
1852 }
1853
1854 /* move data from buffer to register cache */
1855 for (j = 8; j <= 14; j++)
1856 {
1857 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).value, 0, 32, buffer[j - 8]);
1858 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1859 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid = 1;
1860 }
1861 }
1862 }
1863
1864 free(buffer);
1865
1866 return ERROR_OK;
1867 }
1868
1869 int xscale_restore_context(target_t *target)
1870 {
1871 armv4_5_common_t *armv4_5 = target->arch_info;
1872
1873 int i, j;
1874
1875 LOG_DEBUG("-");
1876
1877 if (target->state != TARGET_HALTED)
1878 {
1879 LOG_WARNING("target not halted");
1880 return ERROR_TARGET_NOT_HALTED;
1881 }
1882
1883 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1884 * we can't enter User mode on an XScale (unpredictable),
1885 * but User shares registers with SYS
1886 */
1887 for(i = 1; i < 7; i++)
1888 {
1889 int dirty = 0;
1890
1891 /* check if there are invalid registers in the current mode
1892 */
1893 for (j = 8; j <= 14; j++)
1894 {
1895 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty == 1)
1896 dirty = 1;
1897 }
1898
1899 /* if not USR/SYS, check if the SPSR needs to be written */
1900 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1901 {
1902 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty == 1)
1903 dirty = 1;
1904 }
1905
1906 if (dirty)
1907 {
1908 u32 tmp_cpsr;
1909
1910 /* send banked registers */
1911 xscale_send_u32(target, 0x1);
1912
1913 tmp_cpsr = 0x0;
1914 tmp_cpsr |= armv4_5_number_to_mode(i);
1915 tmp_cpsr |= 0xc0; /* I/F bits */
1916
1917 /* send CPSR for desired mode */
1918 xscale_send_u32(target, tmp_cpsr);
1919
1920 /* send banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1921 for (j = 8; j <= 14; j++)
1922 {
1923 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, j).value, 0, 32));
1924 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1925 }
1926
1927 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1928 {
1929 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32));
1930 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1931 }
1932 }
1933 }
1934
1935 return ERROR_OK;
1936 }
1937
1938 int xscale_read_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer)
1939 {
1940 armv4_5_common_t *armv4_5 = target->arch_info;
1941 xscale_common_t *xscale = armv4_5->arch_info;
1942 u32 *buf32;
1943 int i;
1944 int retval;
1945
1946 LOG_DEBUG("address: 0x%8.8x, size: 0x%8.8x, count: 0x%8.8x", address, size, count);
1947
1948 if (target->state != TARGET_HALTED)
1949 {
1950 LOG_WARNING("target not halted");
1951 return ERROR_TARGET_NOT_HALTED;
1952 }
1953
1954 /* sanitize arguments */
1955 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1956 return ERROR_INVALID_ARGUMENTS;
1957
1958 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1959 return ERROR_TARGET_UNALIGNED_ACCESS;
1960
1961 /* send memory read request (command 0x1n, n: access size) */
1962 if ((retval=xscale_send_u32(target, 0x10 | size))!=ERROR_OK)
1963 return retval;
1964
1965 /* send base address for read request */
1966 if ((retval=xscale_send_u32(target, address))!=ERROR_OK)
1967 return retval;
1968
1969 /* send number of requested data words */
1970 if ((retval=xscale_send_u32(target, count))!=ERROR_OK)
1971 return retval;
1972
1973 /* receive data from target (count times 32-bit words in host endianness) */
1974 buf32 = malloc(4 * count);
1975 if ((retval=xscale_receive(target, buf32, count))!=ERROR_OK)
1976 return retval;
1977
1978 /* extract data from host-endian buffer into byte stream */
1979 for (i = 0; i < count; i++)
1980 {
1981 switch (size)
1982 {
1983 case 4:
1984 target_buffer_set_u32(target, buffer, buf32[i]);
1985 buffer += 4;
1986 break;
1987 case 2:
1988 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1989 buffer += 2;
1990 break;
1991 case 1:
1992 *buffer++ = buf32[i] & 0xff;
1993 break;
1994 default:
1995 LOG_ERROR("should never get here");
1996 exit(-1);
1997 }
1998 }
1999
2000 free(buf32);
2001
2002 /* examine DCSR, to see if Sticky Abort (SA) got set */
2003 if ((retval=xscale_read_dcsr(target))!=ERROR_OK)
2004 return retval;
2005 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
2006 {
2007 /* clear SA bit */
2008 if ((retval=xscale_send_u32(target, 0x60))!=ERROR_OK)
2009 return retval;
2010
2011 return ERROR_TARGET_DATA_ABORT;
2012 }
2013
2014 return ERROR_OK;
2015 }
2016
2017 int xscale_write_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer)
2018 {
2019 armv4_5_common_t *armv4_5 = target->arch_info;
2020 xscale_common_t *xscale = armv4_5->arch_info;
2021 int retval;
2022
2023 LOG_DEBUG("address: 0x%8.8x, size: 0x%8.8x, count: 0x%8.8x", address, size, count);
2024
2025 if (target->state != TARGET_HALTED)
2026 {
2027 LOG_WARNING("target not halted");
2028 return ERROR_TARGET_NOT_HALTED;
2029 }
2030
2031 /* sanitize arguments */
2032 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
2033 return ERROR_INVALID_ARGUMENTS;
2034
2035 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
2036 return ERROR_TARGET_UNALIGNED_ACCESS;
2037
2038 /* send memory write request (command 0x2n, n: access size) */
2039 if ((retval=xscale_send_u32(target, 0x20 | size))!=ERROR_OK)
2040 return retval;
2041
2042 /* send base address for read request */
2043 if ((retval=xscale_send_u32(target, address))!=ERROR_OK)
2044 return retval;
2045
2046 /* send number of requested data words to be written*/
2047 if ((retval=xscale_send_u32(target, count))!=ERROR_OK)
2048 return retval;
2049
2050 /* extract data from host-endian buffer into byte stream */
2051 #if 0
2052 for (i = 0; i < count; i++)
2053 {
2054 switch (size)
2055 {
2056 case 4:
2057 value = target_buffer_get_u32(target, buffer);
2058 xscale_send_u32(target, value);
2059 buffer += 4;
2060 break;
2061 case 2:
2062 value = target_buffer_get_u16(target, buffer);
2063 xscale_send_u32(target, value);
2064 buffer += 2;
2065 break;
2066 case 1:
2067 value = *buffer;
2068 xscale_send_u32(target, value);
2069 buffer += 1;
2070 break;
2071 default:
2072 LOG_ERROR("should never get here");
2073 exit(-1);
2074 }
2075 }
2076 #endif
2077 if ((retval=xscale_send(target, buffer, count, size))!=ERROR_OK)
2078 return retval;
2079
2080 /* examine DCSR, to see if Sticky Abort (SA) got set */
2081 if ((retval=xscale_read_dcsr(target))!=ERROR_OK)
2082 return retval;
2083 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
2084 {
2085 /* clear SA bit */
2086 if ((retval=xscale_send_u32(target, 0x60))!=ERROR_OK)
2087 return retval;
2088
2089 return ERROR_TARGET_DATA_ABORT;
2090 }
2091
2092 return ERROR_OK;
2093 }
2094
2095 int xscale_bulk_write_memory(target_t *target, u32 address, u32 count, u8 *buffer)
2096 {
2097 return xscale_write_memory(target, address, 4, count, buffer);
2098 }
2099
2100 int xscale_checksum_memory(struct target_s *target, u32 address, u32 count, u32* checksum)
2101 {
2102 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2103 }
2104
2105 u32 xscale_get_ttb(target_t *target)
2106 {
2107 armv4_5_common_t *armv4_5 = target->arch_info;
2108 xscale_common_t *xscale = armv4_5->arch_info;
2109 u32 ttb;
2110
2111 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
2112 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
2113
2114 return ttb;
2115 }
2116
2117 void xscale_disable_mmu_caches(target_t *target, int mmu, int d_u_cache, int i_cache)
2118 {
2119 armv4_5_common_t *armv4_5 = target->arch_info;
2120 xscale_common_t *xscale = armv4_5->arch_info;
2121 u32 cp15_control;
2122
2123 /* read cp15 control register */
2124 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2125 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2126
2127 if (mmu)
2128 cp15_control &= ~0x1U;
2129
2130 if (d_u_cache)
2131 {
2132 /* clean DCache */
2133 xscale_send_u32(target, 0x50);
2134 xscale_send_u32(target, xscale->cache_clean_address);
2135
2136 /* invalidate DCache */
2137 xscale_send_u32(target, 0x51);
2138
2139 cp15_control &= ~0x4U;
2140 }
2141
2142 if (i_cache)
2143 {
2144 /* invalidate ICache */
2145 xscale_send_u32(target, 0x52);
2146 cp15_control &= ~0x1000U;
2147 }
2148
2149 /* write new cp15 control register */
2150 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2151
2152 /* execute cpwait to ensure outstanding operations complete */
2153 xscale_send_u32(target, 0x53);
2154 }
2155
2156 void xscale_enable_mmu_caches(target_t *target, int mmu, int d_u_cache, int i_cache)
2157 {
2158 armv4_5_common_t *armv4_5 = target->arch_info;
2159 xscale_common_t *xscale = armv4_5->arch_info;
2160 u32 cp15_control;
2161
2162 /* read cp15 control register */
2163 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2164 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2165
2166 if (mmu)
2167 cp15_control |= 0x1U;
2168
2169 if (d_u_cache)
2170 cp15_control |= 0x4U;
2171
2172 if (i_cache)
2173 cp15_control |= 0x1000U;
2174
2175 /* write new cp15 control register */
2176 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2177
2178 /* execute cpwait to ensure outstanding operations complete */
2179 xscale_send_u32(target, 0x53);
2180 }
2181
2182 int xscale_set_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2183 {
2184 armv4_5_common_t *armv4_5 = target->arch_info;
2185 xscale_common_t *xscale = armv4_5->arch_info;
2186
2187 if (target->state != TARGET_HALTED)
2188 {
2189 LOG_WARNING("target not halted");
2190 return ERROR_TARGET_NOT_HALTED;
2191 }
2192
2193 if (xscale->force_hw_bkpts)
2194 breakpoint->type = BKPT_HARD;
2195
2196 if (breakpoint->set)
2197 {
2198 LOG_WARNING("breakpoint already set");
2199 return ERROR_OK;
2200 }
2201
2202 if (breakpoint->type == BKPT_HARD)
2203 {
2204 u32 value = breakpoint->address | 1;
2205 if (!xscale->ibcr0_used)
2206 {
2207 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2208 xscale->ibcr0_used = 1;
2209 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2210 }
2211 else if (!xscale->ibcr1_used)
2212 {
2213 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2214 xscale->ibcr1_used = 1;
2215 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2216 }
2217 else
2218 {
2219 LOG_ERROR("BUG: no hardware comparator available");
2220 return ERROR_OK;
2221 }
2222 }
2223 else if (breakpoint->type == BKPT_SOFT)
2224 {
2225 if (breakpoint->length == 4)
2226 {
2227 /* keep the original instruction in target endianness */
2228 target->type->read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr);
2229 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2230 target_write_u32(target, breakpoint->address, xscale->arm_bkpt);
2231 }
2232 else
2233 {
2234 /* keep the original instruction in target endianness */
2235 target->type->read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr);
2236 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2237 target_write_u32(target, breakpoint->address, xscale->thumb_bkpt);
2238 }
2239 breakpoint->set = 1;
2240 }
2241
2242 return ERROR_OK;
2243
2244 }
2245
2246 int xscale_add_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2247 {
2248 armv4_5_common_t *armv4_5 = target->arch_info;
2249 xscale_common_t *xscale = armv4_5->arch_info;
2250
2251 if (target->state != TARGET_HALTED)
2252 {
2253 LOG_WARNING("target not halted");
2254 return ERROR_TARGET_NOT_HALTED;
2255 }
2256
2257 if (xscale->force_hw_bkpts)
2258 {
2259 LOG_DEBUG("forcing use of hardware breakpoint at address 0x%8.8x", breakpoint->address);
2260 breakpoint->type = BKPT_HARD;
2261 }
2262
2263 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2264 {
2265 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2266 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2267 }
2268 else
2269 {
2270 xscale->ibcr_available--;
2271 }
2272
2273 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2274 {
2275 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2276 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2277 }
2278
2279 return ERROR_OK;
2280 }
2281
2282 int xscale_unset_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2283 {
2284 armv4_5_common_t *armv4_5 = target->arch_info;
2285 xscale_common_t *xscale = armv4_5->arch_info;
2286
2287 if (target->state != TARGET_HALTED)
2288 {
2289 LOG_WARNING("target not halted");
2290 return ERROR_TARGET_NOT_HALTED;
2291 }
2292
2293 if (!breakpoint->set)
2294 {
2295 LOG_WARNING("breakpoint not set");
2296 return ERROR_OK;
2297 }
2298
2299 if (breakpoint->type == BKPT_HARD)
2300 {
2301 if (breakpoint->set == 1)
2302 {
2303 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2304 xscale->ibcr0_used = 0;
2305 }
2306 else if (breakpoint->set == 2)
2307 {
2308 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2309 xscale->ibcr1_used = 0;
2310 }
2311 breakpoint->set = 0;
2312 }
2313 else
2314 {
2315 /* restore original instruction (kept in target endianness) */
2316 if (breakpoint->length == 4)
2317 {
2318 target->type->write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr);
2319 }
2320 else
2321 {
2322 target->type->write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr);
2323 }
2324 breakpoint->set = 0;
2325 }
2326
2327 return ERROR_OK;
2328 }
2329
2330 int xscale_remove_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2331 {
2332 armv4_5_common_t *armv4_5 = target->arch_info;
2333 xscale_common_t *xscale = armv4_5->arch_info;
2334
2335 if (target->state != TARGET_HALTED)
2336 {
2337 LOG_WARNING("target not halted");
2338 return ERROR_TARGET_NOT_HALTED;
2339 }
2340
2341 if (breakpoint->set)
2342 {
2343 xscale_unset_breakpoint(target, breakpoint);
2344 }
2345
2346 if (breakpoint->type == BKPT_HARD)
2347 xscale->ibcr_available++;
2348
2349 return ERROR_OK;
2350 }
2351
2352 int xscale_set_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2353 {
2354 armv4_5_common_t *armv4_5 = target->arch_info;
2355 xscale_common_t *xscale = armv4_5->arch_info;
2356 u8 enable=0;
2357 reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2358 u32 dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2359
2360 if (target->state != TARGET_HALTED)
2361 {
2362 LOG_WARNING("target not halted");
2363 return ERROR_TARGET_NOT_HALTED;
2364 }
2365
2366 xscale_get_reg(dbcon);
2367
2368 switch (watchpoint->rw)
2369 {
2370 case WPT_READ:
2371 enable = 0x3;
2372 break;
2373 case WPT_ACCESS:
2374 enable = 0x2;
2375 break;
2376 case WPT_WRITE:
2377 enable = 0x1;
2378 break;
2379 default:
2380 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2381 }
2382
2383 if (!xscale->dbr0_used)
2384 {
2385 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2386 dbcon_value |= enable;
2387 xscale_set_reg_u32(dbcon, dbcon_value);
2388 watchpoint->set = 1;
2389 xscale->dbr0_used = 1;
2390 }
2391 else if (!xscale->dbr1_used)
2392 {
2393 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2394 dbcon_value |= enable << 2;
2395 xscale_set_reg_u32(dbcon, dbcon_value);
2396 watchpoint->set = 2;
2397 xscale->dbr1_used = 1;
2398 }
2399 else
2400 {
2401 LOG_ERROR("BUG: no hardware comparator available");
2402 return ERROR_OK;
2403 }
2404
2405 return ERROR_OK;
2406 }
2407
2408 int xscale_add_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2409 {
2410 armv4_5_common_t *armv4_5 = target->arch_info;
2411 xscale_common_t *xscale = armv4_5->arch_info;
2412
2413 if (target->state != TARGET_HALTED)
2414 {
2415 LOG_WARNING("target not halted");
2416 return ERROR_TARGET_NOT_HALTED;
2417 }
2418
2419 if (xscale->dbr_available < 1)
2420 {
2421 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2422 }
2423
2424 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2425 {
2426 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2427 }
2428
2429 xscale->dbr_available--;
2430
2431 return ERROR_OK;
2432 }
2433
2434 int xscale_unset_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2435 {
2436 armv4_5_common_t *armv4_5 = target->arch_info;
2437 xscale_common_t *xscale = armv4_5->arch_info;
2438 reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2439 u32 dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2440
2441 if (target->state != TARGET_HALTED)
2442 {
2443 LOG_WARNING("target not halted");
2444 return ERROR_TARGET_NOT_HALTED;
2445 }
2446
2447 if (!watchpoint->set)
2448 {
2449 LOG_WARNING("breakpoint not set");
2450 return ERROR_OK;
2451 }
2452
2453 if (watchpoint->set == 1)
2454 {
2455 dbcon_value &= ~0x3;
2456 xscale_set_reg_u32(dbcon, dbcon_value);
2457 xscale->dbr0_used = 0;
2458 }
2459 else if (watchpoint->set == 2)
2460 {
2461 dbcon_value &= ~0xc;
2462 xscale_set_reg_u32(dbcon, dbcon_value);
2463 xscale->dbr1_used = 0;
2464 }
2465 watchpoint->set = 0;
2466
2467 return ERROR_OK;
2468 }
2469
2470 int xscale_remove_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2471 {
2472 armv4_5_common_t *armv4_5 = target->arch_info;
2473 xscale_common_t *xscale = armv4_5->arch_info;
2474
2475 if (target->state != TARGET_HALTED)
2476 {
2477 LOG_WARNING("target not halted");
2478 return ERROR_TARGET_NOT_HALTED;
2479 }
2480
2481 if (watchpoint->set)
2482 {
2483 xscale_unset_watchpoint(target, watchpoint);
2484 }
2485
2486 xscale->dbr_available++;
2487
2488 return ERROR_OK;
2489 }
2490
2491 void xscale_enable_watchpoints(struct target_s *target)
2492 {
2493 watchpoint_t *watchpoint = target->watchpoints;
2494
2495 while (watchpoint)
2496 {
2497 if (watchpoint->set == 0)
2498 xscale_set_watchpoint(target, watchpoint);
2499 watchpoint = watchpoint->next;
2500 }
2501 }
2502
2503 void xscale_enable_breakpoints(struct target_s *target)
2504 {
2505 breakpoint_t *breakpoint = target->breakpoints;
2506
2507 /* set any pending breakpoints */
2508 while (breakpoint)
2509 {
2510 if (breakpoint->set == 0)
2511 xscale_set_breakpoint(target, breakpoint);
2512 breakpoint = breakpoint->next;
2513 }
2514 }
2515
2516 int xscale_get_reg(reg_t *reg)
2517 {
2518 xscale_reg_t *arch_info = reg->arch_info;
2519 target_t *target = arch_info->target;
2520 armv4_5_common_t *armv4_5 = target->arch_info;
2521 xscale_common_t *xscale = armv4_5->arch_info;
2522
2523 /* DCSR, TX and RX are accessible via JTAG */
2524 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2525 {
2526 return xscale_read_dcsr(arch_info->target);
2527 }
2528 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2529 {
2530 /* 1 = consume register content */
2531 return xscale_read_tx(arch_info->target, 1);
2532 }
2533 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2534 {
2535 /* can't read from RX register (host -> debug handler) */
2536 return ERROR_OK;
2537 }
2538 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2539 {
2540 /* can't (explicitly) read from TXRXCTRL register */
2541 return ERROR_OK;
2542 }
2543 else /* Other DBG registers have to be transfered by the debug handler */
2544 {
2545 /* send CP read request (command 0x40) */
2546 xscale_send_u32(target, 0x40);
2547
2548 /* send CP register number */
2549 xscale_send_u32(target, arch_info->dbg_handler_number);
2550
2551 /* read register value */
2552 xscale_read_tx(target, 1);
2553 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2554
2555 reg->dirty = 0;
2556 reg->valid = 1;
2557 }
2558
2559 return ERROR_OK;
2560 }
2561
2562 int xscale_set_reg(reg_t *reg, u8* buf)
2563 {
2564 xscale_reg_t *arch_info = reg->arch_info;
2565 target_t *target = arch_info->target;
2566 armv4_5_common_t *armv4_5 = target->arch_info;
2567 xscale_common_t *xscale = armv4_5->arch_info;
2568 u32 value = buf_get_u32(buf, 0, 32);
2569
2570 /* DCSR, TX and RX are accessible via JTAG */
2571 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2572 {
2573 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2574 return xscale_write_dcsr(arch_info->target, -1, -1);
2575 }
2576 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2577 {
2578 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2579 return xscale_write_rx(arch_info->target);
2580 }
2581 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2582 {
2583 /* can't write to TX register (debug-handler -> host) */
2584 return ERROR_OK;
2585 }
2586 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2587 {
2588 /* can't (explicitly) write to TXRXCTRL register */
2589 return ERROR_OK;
2590 }
2591 else /* Other DBG registers have to be transfered by the debug handler */
2592 {
2593 /* send CP write request (command 0x41) */
2594 xscale_send_u32(target, 0x41);
2595
2596 /* send CP register number */
2597 xscale_send_u32(target, arch_info->dbg_handler_number);
2598
2599 /* send CP register value */
2600 xscale_send_u32(target, value);
2601 buf_set_u32(reg->value, 0, 32, value);
2602 }
2603
2604 return ERROR_OK;
2605 }
2606
2607 /* convenience wrapper to access XScale specific registers */
2608 int xscale_set_reg_u32(reg_t *reg, u32 value)
2609 {
2610 u8 buf[4];
2611
2612 buf_set_u32(buf, 0, 32, value);
2613
2614 return xscale_set_reg(reg, buf);
2615 }
2616
2617 int xscale_write_dcsr_sw(target_t *target, u32 value)
2618 {
2619 /* get pointers to arch-specific information */
2620 armv4_5_common_t *armv4_5 = target->arch_info;
2621 xscale_common_t *xscale = armv4_5->arch_info;
2622 reg_t *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2623 xscale_reg_t *dcsr_arch_info = dcsr->arch_info;
2624
2625 /* send CP write request (command 0x41) */
2626 xscale_send_u32(target, 0x41);
2627
2628 /* send CP register number */
2629 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2630
2631 /* send CP register value */
2632 xscale_send_u32(target, value);
2633 buf_set_u32(dcsr->value, 0, 32, value);
2634
2635 return ERROR_OK;
2636 }
2637
2638 int xscale_read_trace(target_t *target)
2639 {
2640 /* get pointers to arch-specific information */
2641 armv4_5_common_t *armv4_5 = target->arch_info;
2642 xscale_common_t *xscale = armv4_5->arch_info;
2643 xscale_trace_data_t **trace_data_p;
2644
2645 /* 258 words from debug handler
2646 * 256 trace buffer entries
2647 * 2 checkpoint addresses
2648 */
2649 u32 trace_buffer[258];
2650 int is_address[256];
2651 int i, j;
2652
2653 if (target->state != TARGET_HALTED)
2654 {
2655 LOG_WARNING("target must be stopped to read trace data");
2656 return ERROR_TARGET_NOT_HALTED;
2657 }
2658
2659 /* send read trace buffer command (command 0x61) */
2660 xscale_send_u32(target, 0x61);
2661
2662 /* receive trace buffer content */
2663 xscale_receive(target, trace_buffer, 258);
2664
2665 /* parse buffer backwards to identify address entries */
2666 for (i = 255; i >= 0; i--)
2667 {
2668 is_address[i] = 0;
2669 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2670 ((trace_buffer[i] & 0xf0) == 0xd0))
2671 {
2672 if (i >= 3)
2673 is_address[--i] = 1;
2674 if (i >= 2)
2675 is_address[--i] = 1;
2676 if (i >= 1)
2677 is_address[--i] = 1;
2678 if (i >= 0)
2679 is_address[--i] = 1;
2680 }
2681 }
2682
2683
2684 /* search first non-zero entry */
2685 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2686 ;
2687
2688 if (j == 256)
2689 {
2690 LOG_DEBUG("no trace data collected");
2691 return ERROR_XSCALE_NO_TRACE_DATA;
2692 }
2693
2694 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2695 ;
2696
2697 *trace_data_p = malloc(sizeof(xscale_trace_data_t));
2698 (*trace_data_p)->next = NULL;
2699 (*trace_data_p)->chkpt0 = trace_buffer[256];
2700 (*trace_data_p)->chkpt1 = trace_buffer[257];
2701 (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
2702 (*trace_data_p)->entries = malloc(sizeof(xscale_trace_entry_t) * (256 - j));
2703 (*trace_data_p)->depth = 256 - j;
2704
2705 for (i = j; i < 256; i++)
2706 {
2707 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2708 if (is_address[i])
2709 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2710 else
2711 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2712 }
2713
2714 return ERROR_OK;
2715 }
2716
2717 int xscale_read_instruction(target_t *target, arm_instruction_t *instruction)
2718 {
2719 /* get pointers to arch-specific information */
2720 armv4_5_common_t *armv4_5 = target->arch_info;
2721 xscale_common_t *xscale = armv4_5->arch_info;
2722 int i;
2723 int section = -1;
2724 u32 size_read;
2725 u32 opcode;
2726 int retval;
2727
2728 if (!xscale->trace.image)
2729 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2730
2731 /* search for the section the current instruction belongs to */
2732 for (i = 0; i < xscale->trace.image->num_sections; i++)
2733 {
2734 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2735 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2736 {
2737 section = i;
2738 break;
2739 }
2740 }
2741
2742 if (section == -1)
2743 {
2744 /* current instruction couldn't be found in the image */
2745 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2746 }
2747
2748 if (xscale->trace.core_state == ARMV4_5_STATE_ARM)
2749 {
2750 u8 buf[4];
2751 if ((retval = image_read_section(xscale->trace.image, section,
2752 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2753 4, buf, &size_read)) != ERROR_OK)
2754 {
2755 LOG_ERROR("error while reading instruction: %i", retval);
2756 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2757 }
2758 opcode = target_buffer_get_u32(target, buf);
2759 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2760 }
2761 else if (xscale->trace.core_state == ARMV4_5_STATE_THUMB)
2762 {
2763 u8 buf[2];
2764 if ((retval = image_read_section(xscale->trace.image, section,
2765 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2766 2, buf, &size_read)) != ERROR_OK)
2767 {
2768 LOG_ERROR("error while reading instruction: %i", retval);
2769 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2770 }
2771 opcode = target_buffer_get_u16(target, buf);
2772 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2773 }
2774 else
2775 {
2776 LOG_ERROR("BUG: unknown core state encountered");
2777 exit(-1);
2778 }
2779
2780 return ERROR_OK;
2781 }
2782
2783 int xscale_branch_address(xscale_trace_data_t *trace_data, int i, u32 *target)
2784 {
2785 /* if there are less than four entries prior to the indirect branch message
2786 * we can't extract the address */
2787 if (i < 4)
2788 {
2789 return -1;
2790 }
2791
2792 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2793 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2794
2795 return 0;
2796 }
2797
2798 int xscale_analyze_trace(target_t *target, command_context_t *cmd_ctx)
2799 {
2800 /* get pointers to arch-specific information */
2801 armv4_5_common_t *armv4_5 = target->arch_info;
2802 xscale_common_t *xscale = armv4_5->arch_info;
2803 int next_pc_ok = 0;
2804 u32 next_pc = 0x0;
2805 xscale_trace_data_t *trace_data = xscale->trace.data;
2806 int retval;
2807
2808 while (trace_data)
2809 {
2810 int i, chkpt;
2811 int rollover;
2812 int branch;
2813 int exception;
2814 xscale->trace.core_state = ARMV4_5_STATE_ARM;
2815
2816 chkpt = 0;
2817 rollover = 0;
2818
2819 for (i = 0; i < trace_data->depth; i++)
2820 {
2821 next_pc_ok = 0;
2822 branch = 0;
2823 exception = 0;
2824
2825 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2826 continue;
2827
2828 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2829 {
2830 case 0: /* Exceptions */
2831 case 1:
2832 case 2:
2833 case 3:
2834 case 4:
2835 case 5:
2836 case 6:
2837 case 7:
2838 exception = (trace_data->entries[i].data & 0x70) >> 4;
2839 next_pc_ok = 1;
2840 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2841 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2842 break;
2843 case 8: /* Direct Branch */
2844 branch = 1;
2845 break;
2846 case 9: /* Indirect Branch */
2847 branch = 1;
2848 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2849 {
2850 next_pc_ok = 1;
2851 }
2852 break;
2853 case 13: /* Checkpointed Indirect Branch */
2854 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2855 {
2856 next_pc_ok = 1;
2857 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2858 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2859 LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2860 }
2861 /* explicit fall-through */
2862 case 12: /* Checkpointed Direct Branch */
2863 branch = 1;
2864 if (chkpt == 0)
2865 {
2866 next_pc_ok = 1;
2867 next_pc = trace_data->chkpt0;
2868 chkpt++;
2869 }
2870 else if (chkpt == 1)
2871 {
2872 next_pc_ok = 1;
2873 next_pc = trace_data->chkpt0;
2874 chkpt++;
2875 }
2876 else
2877 {
2878 LOG_WARNING("more than two checkpointed branches encountered");
2879 }
2880 break;
2881 case 15: /* Roll-over */
2882 rollover++;
2883 continue;
2884 default: /* Reserved */
2885 command_print(cmd_ctx, "--- reserved trace message ---");
2886 LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2887 return ERROR_OK;
2888 }
2889
2890 if (xscale->trace.pc_ok)
2891 {
2892 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2893 arm_instruction_t instruction;
2894
2895 if ((exception == 6) || (exception == 7))
2896 {
2897 /* IRQ or FIQ exception, no instruction executed */
2898 executed -= 1;
2899 }
2900
2901 while (executed-- >= 0)
2902 {
2903 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2904 {
2905 /* can't continue tracing with no image available */
2906 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2907 {
2908 return retval;
2909 }
2910 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2911 {
2912 /* TODO: handle incomplete images */
2913 }
2914 }
2915
2916 /* a precise abort on a load to the PC is included in the incremental
2917 * word count, other instructions causing data aborts are not included
2918 */
2919 if ((executed == 0) && (exception == 4)
2920 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2921 {
2922 if ((instruction.type == ARM_LDM)
2923 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2924 {
2925 executed--;
2926 }
2927 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2928 && (instruction.info.load_store.Rd != 15))
2929 {
2930 executed--;
2931 }
2932 }
2933
2934 /* only the last instruction executed
2935 * (the one that caused the control flow change)
2936 * could be a taken branch
2937 */
2938 if (((executed == -1) && (branch == 1)) &&
2939 (((instruction.type == ARM_B) ||
2940 (instruction.type == ARM_BL) ||
2941 (instruction.type == ARM_BLX)) &&
2942 (instruction.info.b_bl_bx_blx.target_address != -1)))
2943 {
2944 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2945 }
2946 else
2947 {
2948 xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2;
2949 }
2950 command_print(cmd_ctx, "%s", instruction.text);
2951 }
2952
2953 rollover = 0;
2954 }
2955
2956 if (next_pc_ok)
2957 {
2958 xscale->trace.current_pc = next_pc;
2959 xscale->trace.pc_ok = 1;
2960 }
2961 }
2962
2963 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2)
2964 {
2965 arm_instruction_t instruction;
2966 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2967 {
2968 /* can't continue tracing with no image available */
2969 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2970 {
2971 return retval;
2972 }
2973 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2974 {
2975 /* TODO: handle incomplete images */
2976 }
2977 }
2978 command_print(cmd_ctx, "%s", instruction.text);
2979 }
2980
2981 trace_data = trace_data->next;
2982 }
2983
2984 return ERROR_OK;
2985 }
2986
2987 void xscale_build_reg_cache(target_t *target)
2988 {
2989 /* get pointers to arch-specific information */
2990 armv4_5_common_t *armv4_5 = target->arch_info;
2991 xscale_common_t *xscale = armv4_5->arch_info;
2992
2993 reg_cache_t **cache_p = register_get_last_cache_p(&target->reg_cache);
2994 xscale_reg_t *arch_info = malloc(sizeof(xscale_reg_arch_info));
2995 int i;
2996 int num_regs = sizeof(xscale_reg_arch_info) / sizeof(xscale_reg_t);
2997
2998 (*cache_p) = armv4_5_build_reg_cache(target, armv4_5);
2999 armv4_5->core_cache = (*cache_p);
3000
3001 /* register a register arch-type for XScale dbg registers only once */
3002 if (xscale_reg_arch_type == -1)
3003 xscale_reg_arch_type = register_reg_arch_type(xscale_get_reg, xscale_set_reg);
3004
3005 (*cache_p)->next = malloc(sizeof(reg_cache_t));
3006 cache_p = &(*cache_p)->next;
3007
3008 /* fill in values for the xscale reg cache */
3009 (*cache_p)->name = "XScale registers";
3010 (*cache_p)->next = NULL;
3011 (*cache_p)->reg_list = malloc(num_regs * sizeof(reg_t));
3012 (*cache_p)->num_regs = num_regs;
3013
3014 for (i = 0; i < num_regs; i++)
3015 {
3016 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
3017 (*cache_p)->reg_list[i].value = calloc(4, 1);
3018 (*cache_p)->reg_list[i].dirty = 0;
3019 (*cache_p)->reg_list[i].valid = 0;
3020 (*cache_p)->reg_list[i].size = 32;
3021 (*cache_p)->reg_list[i].bitfield_desc = NULL;
3022 (*cache_p)->reg_list[i].num_bitfields = 0;
3023 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
3024 (*cache_p)->reg_list[i].arch_type = xscale_reg_arch_type;
3025 arch_info[i] = xscale_reg_arch_info[i];
3026 arch_info[i].target = target;
3027 }
3028
3029 xscale->reg_cache = (*cache_p);
3030 }
3031
3032 int xscale_init_target(struct command_context_s *cmd_ctx, struct target_s *target)
3033 {
3034 return ERROR_OK;
3035 }
3036
3037 int xscale_quit()
3038 {
3039
3040 return ERROR_OK;
3041 }
3042
3043 int xscale_init_arch_info(target_t *target, xscale_common_t *xscale, int chain_pos, char *variant)
3044 {
3045 armv4_5_common_t *armv4_5;
3046 u32 high_reset_branch, low_reset_branch;
3047 int i;
3048
3049 armv4_5 = &xscale->armv4_5_common;
3050
3051 /* store architecture specfic data (none so far) */
3052 xscale->arch_info = NULL;
3053 xscale->common_magic = XSCALE_COMMON_MAGIC;
3054
3055 /* remember the variant (PXA25x, PXA27x, IXP42x, ...) */
3056 xscale->variant = strdup(variant);
3057
3058 /* prepare JTAG information for the new target */
3059 xscale->jtag_info.chain_pos = chain_pos;
3060
3061 xscale->jtag_info.dbgrx = 0x02;
3062 xscale->jtag_info.dbgtx = 0x10;
3063 xscale->jtag_info.dcsr = 0x09;
3064 xscale->jtag_info.ldic = 0x07;
3065
3066 if ((strcmp(xscale->variant, "pxa250") == 0) ||
3067 (strcmp(xscale->variant, "pxa255") == 0) ||
3068 (strcmp(xscale->variant, "pxa26x") == 0))
3069 {
3070 xscale->jtag_info.ir_length = 5;
3071 }
3072 else if ((strcmp(xscale->variant, "pxa27x") == 0) ||
3073 (strcmp(xscale->variant, "ixp42x") == 0) ||
3074 (strcmp(xscale->variant, "ixp45x") == 0) ||
3075 (strcmp(xscale->variant, "ixp46x") == 0))
3076 {
3077 xscale->jtag_info.ir_length = 7;
3078 }
3079
3080 /* the debug handler isn't installed (and thus not running) at this time */
3081 xscale->handler_installed = 0;
3082 xscale->handler_running = 0;
3083 xscale->handler_address = 0xfe000800;
3084
3085 /* clear the vectors we keep locally for reference */
3086 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
3087 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
3088
3089 /* no user-specified vectors have been configured yet */
3090 xscale->static_low_vectors_set = 0x0;
3091 xscale->static_high_vectors_set = 0x0;
3092
3093 /* calculate branches to debug handler */
3094 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
3095 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
3096
3097 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
3098 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
3099
3100 for (i = 1; i <= 7; i++)
3101 {
3102 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3103 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3104 }
3105
3106 /* 64kB aligned region used for DCache cleaning */
3107 xscale->cache_clean_address = 0xfffe0000;
3108
3109 xscale->hold_rst = 0;
3110 xscale->external_debug_break = 0;
3111
3112 xscale->force_hw_bkpts = 1;
3113
3114 xscale->ibcr_available = 2;
3115 xscale->ibcr0_used = 0;
3116 xscale->ibcr1_used = 0;
3117
3118 xscale->dbr_available = 2;
3119 xscale->dbr0_used = 0;
3120 xscale->dbr1_used = 0;
3121
3122 xscale->arm_bkpt = ARMV5_BKPT(0x0);
3123 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
3124
3125 xscale->vector_catch = 0x1;
3126
3127 xscale->trace.capture_status = TRACE_IDLE;
3128 xscale->trace.data = NULL;
3129 xscale->trace.image = NULL;
3130 xscale->trace.buffer_enabled = 0;
3131 xscale->trace.buffer_fill = 0;
3132
3133 /* prepare ARMv4/5 specific information */
3134 armv4_5->arch_info = xscale;
3135 armv4_5->read_core_reg = xscale_read_core_reg;
3136 armv4_5->write_core_reg = xscale_write_core_reg;
3137 armv4_5->full_context = xscale_full_context;
3138
3139 armv4_5_init_arch_info(target, armv4_5);
3140
3141 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
3142 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
3143 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
3144 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
3145 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
3146 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
3147 xscale->armv4_5_mmu.has_tiny_pages = 1;
3148 xscale->armv4_5_mmu.mmu_enabled = 0;
3149
3150 xscale->fast_memory_access = 0;
3151
3152 return ERROR_OK;
3153 }
3154
3155 /* target xscale <endianess> <startup_mode> <chain_pos> <variant> */
3156 int xscale_target_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc, struct target_s *target)
3157 {
3158 int chain_pos;
3159 char *variant = NULL;
3160 xscale_common_t *xscale = malloc(sizeof(xscale_common_t));
3161 memset(xscale, 0, sizeof(*xscale));
3162
3163 if (argc < 5)
3164 {
3165 LOG_ERROR("'target xscale' requires four arguments: <endianess> <startup_mode> <chain_pos> <variant>");
3166 return ERROR_OK;
3167 }
3168
3169 chain_pos = strtoul(args[3], NULL, 0);
3170
3171 variant = args[4];
3172
3173 xscale_init_arch_info(target, xscale, chain_pos, variant);
3174 xscale_build_reg_cache(target);
3175
3176 return ERROR_OK;
3177 }
3178
3179 int xscale_handle_debug_handler_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3180 {
3181 target_t *target = NULL;
3182 armv4_5_common_t *armv4_5;
3183 xscale_common_t *xscale;
3184
3185 u32 handler_address;
3186
3187 if (argc < 2)
3188 {
3189 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3190 return ERROR_OK;
3191 }
3192
3193 if ((target = get_target_by_num(strtoul(args[0], NULL, 0))) == NULL)
3194 {
3195 LOG_ERROR("no target '%s' configured", args[0]);
3196 return ERROR_OK;
3197 }
3198
3199 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3200 {
3201 return ERROR_OK;
3202 }
3203
3204 handler_address = strtoul(args[1], NULL, 0);
3205
3206 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3207 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3208 {
3209 xscale->handler_address = handler_address;
3210 }
3211 else
3212 {
3213 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3214 }
3215
3216 return ERROR_OK;
3217 }
3218
3219 int xscale_handle_cache_clean_address_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3220 {
3221 target_t *target = NULL;
3222 armv4_5_common_t *armv4_5;
3223 xscale_common_t *xscale;
3224
3225 u32 cache_clean_address;
3226
3227 if (argc < 2)
3228 {
3229 LOG_ERROR("'xscale cache_clean_address <target#> <address>' command takes two required operands");
3230 return ERROR_OK;
3231 }
3232
3233 if ((target = get_target_by_num(strtoul(args[0], NULL, 0))) == NULL)
3234 {
3235 LOG_ERROR("no target '%s' configured", args[0]);
3236 return ERROR_OK;
3237 }
3238
3239 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3240 {
3241 return ERROR_OK;
3242 }
3243
3244 cache_clean_address = strtoul(args[1], NULL, 0);
3245
3246 if (cache_clean_address & 0xffff)
3247 {
3248 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3249 }
3250 else
3251 {
3252 xscale->cache_clean_address = cache_clean_address;
3253 }
3254
3255 return ERROR_OK;
3256 }
3257
3258 int xscale_handle_cache_info_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3259 {
3260 target_t *target = get_current_target(cmd_ctx);
3261 armv4_5_common_t *armv4_5;
3262 xscale_common_t *xscale;
3263
3264 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3265 {
3266 return ERROR_OK;
3267 }
3268
3269 return armv4_5_handle_cache_info_command(cmd_ctx, &xscale->armv4_5_mmu.armv4_5_cache);
3270 }
3271
3272 static int xscale_virt2phys(struct target_s *target, u32 virtual, u32 *physical)
3273 {
3274 armv4_5_common_t *armv4_5;
3275 xscale_common_t *xscale;
3276 int retval;
3277 int type;
3278 u32 cb;
3279 int domain;
3280 u32 ap;
3281
3282
3283 if ((retval = xscale_get_arch_pointers(target, &armv4_5, &xscale)) != ERROR_OK)
3284 {
3285 return retval;
3286 }
3287 u32 ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3288 if (type == -1)
3289 {
3290 return ret;
3291 }
3292 *physical = ret;
3293 return ERROR_OK;
3294 }
3295
3296 static int xscale_mmu(struct target_s *target, int *enabled)
3297 {
3298 armv4_5_common_t *armv4_5 = target->arch_info;
3299 xscale_common_t *xscale = armv4_5->arch_info;
3300
3301 if (target->state != TARGET_HALTED)
3302 {
3303 LOG_ERROR("Target not halted");
3304 return ERROR_TARGET_INVALID;
3305 }
3306 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3307 return ERROR_OK;
3308 }
3309
3310
3311 int xscale_handle_mmu_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3312 {
3313 target_t *target = get_current_target(cmd_ctx);
3314 armv4_5_common_t *armv4_5;
3315 xscale_common_t *xscale;
3316
3317 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3318 {
3319 return ERROR_OK;
3320 }
3321
3322 if (target->state != TARGET_HALTED)
3323 {
3324 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3325 return ERROR_OK;
3326 }
3327
3328 if (argc >= 1)
3329 {
3330 if (strcmp("enable", args[0]) == 0)
3331 {
3332 xscale_enable_mmu_caches(target, 1, 0, 0);
3333 xscale->armv4_5_mmu.mmu_enabled = 1;
3334 }
3335 else if (strcmp("disable", args[0]) == 0)
3336 {
3337 xscale_disable_mmu_caches(target, 1, 0, 0);
3338 xscale->armv4_5_mmu.mmu_enabled = 0;
3339 }
3340 }
3341
3342 command_print(cmd_ctx, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3343
3344 return ERROR_OK;
3345 }
3346
3347 int xscale_handle_idcache_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3348 {
3349 target_t *target = get_current_target(cmd_ctx);
3350 armv4_5_common_t *armv4_5;
3351 xscale_common_t *xscale;
3352 int icache = 0, dcache = 0;
3353
3354 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3355 {
3356 return ERROR_OK;
3357 }
3358
3359 if (target->state != TARGET_HALTED)
3360 {
3361 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3362 return ERROR_OK;
3363 }
3364
3365 if (strcmp(cmd, "icache") == 0)
3366 icache = 1;
3367 else if (strcmp(cmd, "dcache") == 0)
3368 dcache = 1;
3369
3370 if (argc >= 1)
3371 {
3372 if (strcmp("enable", args[0]) == 0)
3373 {
3374 xscale_enable_mmu_caches(target, 0, dcache, icache);
3375
3376 if (icache)
3377 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 1;
3378 else if (dcache)
3379 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 1;
3380 }
3381 else if (strcmp("disable", args[0]) == 0)
3382 {
3383 xscale_disable_mmu_caches(target, 0, dcache, icache);
3384
3385 if (icache)
3386 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 0;
3387 else if (dcache)
3388 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 0;
3389 }
3390 }
3391
3392 if (icache)
3393 command_print(cmd_ctx, "icache %s", (xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled) ? "enabled" : "disabled");
3394
3395 if (dcache)
3396 command_print(cmd_ctx, "dcache %s", (xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled) ? "enabled" : "disabled");
3397
3398 return ERROR_OK;
3399 }
3400
3401 int xscale_handle_vector_catch_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3402 {
3403 target_t *target = get_current_target(cmd_ctx);
3404 armv4_5_common_t *armv4_5;
3405 xscale_common_t *xscale;
3406
3407 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3408 {
3409 return ERROR_OK;
3410 }
3411
3412 if (argc < 1)
3413 {
3414 command_print(cmd_ctx, "usage: xscale vector_catch [mask]");
3415 }
3416 else
3417 {
3418 xscale->vector_catch = strtoul(args[0], NULL, 0);
3419 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3420 xscale_write_dcsr(target, -1, -1);
3421 }
3422
3423 command_print(cmd_ctx, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3424
3425 return ERROR_OK;
3426 }
3427
3428 int xscale_handle_force_hw_bkpts_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3429 {
3430 target_t *target = get_current_target(cmd_ctx);
3431 armv4_5_common_t *armv4_5;
3432 xscale_common_t *xscale;
3433
3434 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3435 {
3436 return ERROR_OK;
3437 }
3438
3439 if ((argc >= 1) && (strcmp("enable", args[0]) == 0))
3440 {
3441 xscale->force_hw_bkpts = 1;
3442 }
3443 else if ((argc >= 1) && (strcmp("disable", args[0]) == 0))
3444 {
3445 xscale->force_hw_bkpts = 0;
3446 }
3447 else
3448 {
3449 command_print(cmd_ctx, "usage: xscale force_hw_bkpts <enable|disable>");
3450 }
3451
3452 command_print(cmd_ctx, "force hardware breakpoints %s", (xscale->force_hw_bkpts) ? "enabled" : "disabled");
3453
3454 return ERROR_OK;
3455 }
3456
3457 int xscale_handle_trace_buffer_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3458 {
3459 target_t *target = get_current_target(cmd_ctx);
3460 armv4_5_common_t *armv4_5;
3461 xscale_common_t *xscale;
3462 u32 dcsr_value;
3463
3464 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3465 {
3466 return ERROR_OK;
3467 }
3468
3469 if (target->state != TARGET_HALTED)
3470 {
3471 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3472 return ERROR_OK;
3473 }
3474
3475 if ((argc >= 1) && (strcmp("enable", args[0]) == 0))
3476 {
3477 xscale_trace_data_t *td, *next_td;
3478 xscale->trace.buffer_enabled = 1;
3479
3480 /* free old trace data */
3481 td = xscale->trace.data;
3482 while (td)
3483 {
3484 next_td = td->next;
3485
3486 if (td->entries)
3487 free(td->entries);
3488 free(td);
3489 td = next_td;
3490 }
3491 xscale->trace.data = NULL;
3492 }
3493 else if ((argc >= 1) && (strcmp("disable", args[0]) == 0))
3494 {
3495 xscale->trace.buffer_enabled = 0;
3496 }
3497
3498 if ((argc >= 2) && (strcmp("fill", args[1]) == 0))
3499 {
3500 if (argc >= 3)
3501 xscale->trace.buffer_fill = strtoul(args[2], NULL, 0);
3502 else
3503 xscale->trace.buffer_fill = 1;
3504 }
3505 else if ((argc >= 2) && (strcmp("wrap", args[1]) == 0))
3506 {
3507 xscale->trace.buffer_fill = -1;
3508 }
3509
3510 if (xscale->trace.buffer_enabled)
3511 {
3512 /* if we enable the trace buffer in fill-once
3513 * mode we know the address of the first instruction */
3514 xscale->trace.pc_ok = 1;
3515 xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
3516 }
3517 else
3518 {
3519 /* otherwise the address is unknown, and we have no known good PC */
3520 xscale->trace.pc_ok = 0;
3521 }
3522
3523 command_print(cmd_ctx, "trace buffer %s (%s)",
3524 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3525 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3526
3527 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3528 if (xscale->trace.buffer_fill >= 0)
3529 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3530 else
3531 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3532
3533 return ERROR_OK;
3534 }
3535
3536 int xscale_handle_trace_image_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3537 {
3538 target_t *target;
3539 armv4_5_common_t *armv4_5;
3540 xscale_common_t *xscale;
3541
3542 if (argc < 1)
3543 {
3544 command_print(cmd_ctx, "usage: xscale trace_image <file> [base address] [type]");
3545 return ERROR_OK;
3546 }
3547
3548 target = get_current_target(cmd_ctx);
3549
3550 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3551 {
3552 return ERROR_OK;
3553 }
3554
3555 if (xscale->trace.image)
3556 {
3557 image_close(xscale->trace.image);
3558 free(xscale->trace.image);
3559 command_print(cmd_ctx, "previously loaded image found and closed");
3560 }
3561
3562 xscale->trace.image = malloc(sizeof(image_t));
3563 xscale->trace.image->base_address_set = 0;
3564 xscale->trace.image->start_address_set = 0;
3565
3566 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3567 if (argc >= 2)
3568 {
3569 xscale->trace.image->base_address_set = 1;
3570 xscale->trace.image->base_address = strtoul(args[1], NULL, 0);
3571 }
3572 else
3573 {
3574 xscale->trace.image->base_address_set = 0;
3575 }
3576
3577 if (image_open(xscale->trace.image, args[0], (argc >= 3) ? args[2] : NULL) != ERROR_OK)
3578 {
3579 free(xscale->trace.image);
3580 xscale->trace.image = NULL;
3581 return ERROR_OK;
3582 }
3583
3584 return ERROR_OK;
3585 }
3586
3587 int xscale_handle_dump_trace_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3588 {
3589 target_t *target = get_current_target(cmd_ctx);
3590 armv4_5_common_t *armv4_5;
3591 xscale_common_t *xscale;
3592 xscale_trace_data_t *trace_data;
3593 fileio_t file;
3594
3595 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3596 {
3597 return ERROR_OK;
3598 }
3599
3600 if (target->state != TARGET_HALTED)
3601 {
3602 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3603 return ERROR_OK;
3604 }
3605
3606 if (argc < 1)
3607 {
3608 command_print(cmd_ctx, "usage: xscale dump_trace <file>");
3609 return ERROR_OK;
3610 }
3611
3612 trace_data = xscale->trace.data;
3613
3614 if (!trace_data)
3615 {
3616 command_print(cmd_ctx, "no trace data collected");
3617 return ERROR_OK;
3618 }
3619
3620 if (fileio_open(&file, args[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3621 {
3622 return ERROR_OK;
3623 }
3624
3625 while (trace_data)
3626 {
3627 int i;
3628
3629 fileio_write_u32(&file, trace_data->chkpt0);
3630 fileio_write_u32(&file, trace_data->chkpt1);
3631 fileio_write_u32(&file, trace_data->last_instruction);
3632 fileio_write_u32(&file, trace_data->depth);
3633
3634 for (i = 0; i < trace_data->depth; i++)
3635 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3636
3637 trace_data = trace_data->next;
3638 }
3639
3640 fileio_close(&file);
3641
3642 return ERROR_OK;
3643 }
3644
3645 int xscale_handle_analyze_trace_buffer_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3646 {
3647 target_t *target = get_current_target(cmd_ctx);
3648 armv4_5_common_t *armv4_5;
3649 xscale_common_t *xscale;
3650
3651 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3652 {
3653 return ERROR_OK;
3654 }
3655
3656 xscale_analyze_trace(target, cmd_ctx);
3657
3658 return ERROR_OK;
3659 }
3660
3661 int xscale_handle_cp15(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3662 {
3663 target_t *target = get_current_target(cmd_ctx);
3664 armv4_5_common_t *armv4_5;
3665 xscale_common_t *xscale;
3666
3667 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3668 {
3669 return ERROR_OK;
3670 }
3671
3672 if (target->state != TARGET_HALTED)
3673 {
3674 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3675 return ERROR_OK;
3676 }
3677 u32 reg_no = 0;
3678 reg_t *reg = NULL;
3679 if(argc > 0)
3680 {
3681 reg_no = strtoul(args[0], NULL, 0);
3682 /*translate from xscale cp15 register no to openocd register*/
3683 switch(reg_no)
3684 {
3685 case 0:
3686 reg_no = XSCALE_MAINID;
3687 break;
3688 case 1:
3689 reg_no = XSCALE_CTRL;
3690 break;
3691 case 2:
3692 reg_no = XSCALE_TTB;
3693 break;
3694 case 3:
3695 reg_no = XSCALE_DAC;
3696 break;
3697 case 5:
3698 reg_no = XSCALE_FSR;
3699 break;
3700 case 6:
3701 reg_no = XSCALE_FAR;
3702 break;
3703 case 13:
3704 reg_no = XSCALE_PID;
3705 break;
3706 case 15:
3707 reg_no = XSCALE_CPACCESS;
3708 break;
3709 default:
3710 command_print(cmd_ctx, "invalid register number");
3711 return ERROR_INVALID_ARGUMENTS;
3712 }
3713 reg = &xscale->reg_cache->reg_list[reg_no];
3714
3715 }
3716 if(argc == 1)
3717 {
3718 u32 value;
3719
3720 /* read cp15 control register */
3721 xscale_get_reg(reg);
3722 value = buf_get_u32(reg->value, 0, 32);
3723 command_print(cmd_ctx, "%s (/%i): 0x%x", reg->name, reg->size, value);
3724 }
3725 else if(argc == 2)
3726 {
3727
3728 u32 value = strtoul(args[1], NULL, 0);
3729
3730 /* send CP write request (command 0x41) */
3731 xscale_send_u32(target, 0x41);
3732
3733 /* send CP register number */
3734 xscale_send_u32(target, reg_no);
3735
3736 /* send CP register value */
3737 xscale_send_u32(target, value);
3738
3739 /* execute cpwait to ensure outstanding operations complete */
3740 xscale_send_u32(target, 0x53);
3741 }
3742 else
3743 {
3744 command_print(cmd_ctx, "usage: cp15 [register]<, [value]>");
3745 }
3746
3747 return ERROR_OK;
3748 }
3749
3750 int handle_xscale_fast_memory_access_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3751 {
3752 target_t *target = get_current_target(cmd_ctx);
3753 armv4_5_common_t *armv4_5;
3754 xscale_common_t *xscale;
3755
3756 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3757 {
3758 return ERROR_OK;
3759 }
3760
3761 if (argc == 1)
3762 {
3763 if (strcmp("enable", args[0]) == 0)
3764 {
3765 xscale->fast_memory_access = 1;
3766 }
3767 else if (strcmp("disable", args[0]) == 0)
3768 {
3769 xscale->fast_memory_access = 0;
3770 }
3771 else
3772 {
3773 return ERROR_COMMAND_SYNTAX_ERROR;
3774 }
3775 } else if (argc!=0)
3776 {
3777 return ERROR_COMMAND_SYNTAX_ERROR;
3778 }
3779
3780 command_print(cmd_ctx, "fast memory access is %s", (xscale->fast_memory_access) ? "enabled" : "disabled");
3781
3782 return ERROR_OK;
3783 }
3784
3785 int xscale_register_commands(struct command_context_s *cmd_ctx)
3786 {
3787 command_t *xscale_cmd;
3788
3789 xscale_cmd = register_command(cmd_ctx, NULL, "xscale", NULL, COMMAND_ANY, "xscale specific commands");
3790
3791 register_command(cmd_ctx, xscale_cmd, "debug_handler", xscale_handle_debug_handler_command, COMMAND_ANY, "'xscale debug_handler <target#> <address>' command takes two required operands");
3792 register_command(cmd_ctx, xscale_cmd, "cache_clean_address", xscale_handle_cache_clean_address_command, COMMAND_ANY, NULL);
3793
3794 register_command(cmd_ctx, xscale_cmd, "cache_info", xscale_handle_cache_info_command, COMMAND_EXEC, NULL);
3795 register_command(cmd_ctx, xscale_cmd, "mmu", xscale_handle_mmu_command, COMMAND_EXEC, "['enable'|'disable'] the MMU");
3796 register_command(cmd_ctx, xscale_cmd, "icache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the ICache");
3797 register_command(cmd_ctx, xscale_cmd, "dcache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the DCache");
3798
3799 register_command(cmd_ctx, xscale_cmd, "vector_catch", xscale_handle_idcache_command, COMMAND_EXEC, "<mask> of vectors that should be catched");
3800
3801 register_command(cmd_ctx, xscale_cmd, "trace_buffer", xscale_handle_trace_buffer_command, COMMAND_EXEC, "<enable|disable> ['fill' [n]|'wrap']");
3802
3803 register_command(cmd_ctx, xscale_cmd, "dump_trace", xscale_handle_dump_trace_command, COMMAND_EXEC, "dump content of trace buffer to <file>");
3804 register_command(cmd_ctx, xscale_cmd, "analyze_trace", xscale_handle_analyze_trace_buffer_command, COMMAND_EXEC, "analyze content of trace buffer");
3805 register_command(cmd_ctx, xscale_cmd, "trace_image", xscale_handle_trace_image_command,
3806 COMMAND_EXEC, "load image from <file> [base address]");
3807
3808 register_command(cmd_ctx, xscale_cmd, "cp15", xscale_handle_cp15, COMMAND_EXEC, "access coproc 15 <register> [value]");
3809 register_command(cmd_ctx, xscale_cmd, "fast_memory_access", handle_xscale_fast_memory_access_command,
3810 COMMAND_ANY, "use fast memory accesses instead of slower but potentially unsafe slow accesses <enable|disable>");
3811
3812
3813 armv4_5_register_commands(cmd_ctx);
3814
3815 return ERROR_OK;
3816 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)