jtag: retire jtag_get/set_end_state()
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
29
30 #include "breakpoints.h"
31 #include "xscale.h"
32 #include "target_type.h"
33 #include "arm_jtag.h"
34 #include "arm_simulator.h"
35 #include "arm_disassembler.h"
36 #include <helper/time_support.h>
37 #include "register.h"
38 #include "image.h"
39 #include "arm_opcodes.h"
40 #include "armv4_5.h"
41
42
43 /*
44 * Important XScale documents available as of October 2009 include:
45 *
46 * Intel XScale® Core Developer’s Manual, January 2004
47 * Order Number: 273473-002
48 * This has a chapter detailing debug facilities, and punts some
49 * details to chip-specific microarchitecture documents.
50 *
51 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
52 * Document Number: 273539-005
53 * Less detailed than the developer's manual, but summarizes those
54 * missing details (for most XScales) and gives LOTS of notes about
55 * debugger/handler interaction issues. Presents a simpler reset
56 * and load-handler sequence than the arch doc. (Note, OpenOCD
57 * doesn't currently support "Hot-Debug" as defined there.)
58 *
59 * Chip-specific microarchitecture documents may also be useful.
60 */
61
62
63 /* forward declarations */
64 static int xscale_resume(struct target *, int current,
65 uint32_t address, int handle_breakpoints, int debug_execution);
66 static int xscale_debug_entry(struct target *);
67 static int xscale_restore_banked(struct target *);
68 static int xscale_get_reg(struct reg *reg);
69 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
70 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
71 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
72 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
73 static int xscale_read_trace(struct target *);
74
75
76 /* This XScale "debug handler" is loaded into the processor's
77 * mini-ICache, which is 2K of code writable only via JTAG.
78 *
79 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
80 * binary files cleanly. It's string oriented, and terminates them
81 * with a NUL character. Better would be to generate the constants
82 * and let other code decide names, scoping, and other housekeeping.
83 */
84 static /* unsigned const char xscale_debug_handler[] = ... */
85 #include "xscale_debug.h"
86
87 static char *const xscale_reg_list[] =
88 {
89 "XSCALE_MAINID", /* 0 */
90 "XSCALE_CACHETYPE",
91 "XSCALE_CTRL",
92 "XSCALE_AUXCTRL",
93 "XSCALE_TTB",
94 "XSCALE_DAC",
95 "XSCALE_FSR",
96 "XSCALE_FAR",
97 "XSCALE_PID",
98 "XSCALE_CPACCESS",
99 "XSCALE_IBCR0", /* 10 */
100 "XSCALE_IBCR1",
101 "XSCALE_DBR0",
102 "XSCALE_DBR1",
103 "XSCALE_DBCON",
104 "XSCALE_TBREG",
105 "XSCALE_CHKPT0",
106 "XSCALE_CHKPT1",
107 "XSCALE_DCSR",
108 "XSCALE_TX",
109 "XSCALE_RX", /* 20 */
110 "XSCALE_TXRXCTRL",
111 };
112
113 static const struct xscale_reg xscale_reg_arch_info[] =
114 {
115 {XSCALE_MAINID, NULL},
116 {XSCALE_CACHETYPE, NULL},
117 {XSCALE_CTRL, NULL},
118 {XSCALE_AUXCTRL, NULL},
119 {XSCALE_TTB, NULL},
120 {XSCALE_DAC, NULL},
121 {XSCALE_FSR, NULL},
122 {XSCALE_FAR, NULL},
123 {XSCALE_PID, NULL},
124 {XSCALE_CPACCESS, NULL},
125 {XSCALE_IBCR0, NULL},
126 {XSCALE_IBCR1, NULL},
127 {XSCALE_DBR0, NULL},
128 {XSCALE_DBR1, NULL},
129 {XSCALE_DBCON, NULL},
130 {XSCALE_TBREG, NULL},
131 {XSCALE_CHKPT0, NULL},
132 {XSCALE_CHKPT1, NULL},
133 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
134 {-1, NULL}, /* TX accessed via JTAG */
135 {-1, NULL}, /* RX accessed via JTAG */
136 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
137 };
138
139 /* convenience wrapper to access XScale specific registers */
140 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
141 {
142 uint8_t buf[4];
143
144 buf_set_u32(buf, 0, 32, value);
145
146 return xscale_set_reg(reg, buf);
147 }
148
149 static const char xscale_not[] = "target is not an XScale";
150
151 static int xscale_verify_pointer(struct command_context *cmd_ctx,
152 struct xscale_common *xscale)
153 {
154 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
155 command_print(cmd_ctx, xscale_not);
156 return ERROR_TARGET_INVALID;
157 }
158 return ERROR_OK;
159 }
160
161 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr, tap_state_t end_state)
162 {
163 if (tap == NULL)
164 return ERROR_FAIL;
165
166 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
167 {
168 struct scan_field field;
169 uint8_t scratch[4];
170
171 memset(&field, 0, sizeof field);
172 field.num_bits = tap->ir_length;
173 field.out_value = scratch;
174 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
175
176 jtag_add_ir_scan(tap, &field, end_state);
177 }
178
179 return ERROR_OK;
180 }
181
182 static int xscale_read_dcsr(struct target *target)
183 {
184 struct xscale_common *xscale = target_to_xscale(target);
185 int retval;
186 struct scan_field fields[3];
187 uint8_t field0 = 0x0;
188 uint8_t field0_check_value = 0x2;
189 uint8_t field0_check_mask = 0x7;
190 uint8_t field2 = 0x0;
191 uint8_t field2_check_value = 0x0;
192 uint8_t field2_check_mask = 0x1;
193
194 xscale_jtag_set_instr(target->tap,
195 XSCALE_SELDCSR << xscale->xscale_variant,
196 TAP_DRPAUSE);
197
198 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
199 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
200
201 memset(&fields, 0, sizeof fields);
202
203 fields[0].num_bits = 3;
204 fields[0].out_value = &field0;
205 uint8_t tmp;
206 fields[0].in_value = &tmp;
207
208 fields[1].num_bits = 32;
209 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
210
211 fields[2].num_bits = 1;
212 fields[2].out_value = &field2;
213 uint8_t tmp2;
214 fields[2].in_value = &tmp2;
215
216 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
217
218 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
219 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
220
221 if ((retval = jtag_execute_queue()) != ERROR_OK)
222 {
223 LOG_ERROR("JTAG error while reading DCSR");
224 return retval;
225 }
226
227 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
228 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
229
230 /* write the register with the value we just read
231 * on this second pass, only the first bit of field0 is guaranteed to be 0)
232 */
233 field0_check_mask = 0x1;
234 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
235 fields[1].in_value = NULL;
236
237 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
238
239 /* DANGER!!! this must be here. It will make sure that the arguments
240 * to jtag_set_check_value() does not go out of scope! */
241 return jtag_execute_queue();
242 }
243
244
245 static void xscale_getbuf(jtag_callback_data_t arg)
246 {
247 uint8_t *in = (uint8_t *)arg;
248 *((uint32_t *)in) = buf_get_u32(in, 0, 32);
249 }
250
251 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
252 {
253 if (num_words == 0)
254 return ERROR_INVALID_ARGUMENTS;
255
256 struct xscale_common *xscale = target_to_xscale(target);
257 int retval = ERROR_OK;
258 tap_state_t path[3];
259 struct scan_field fields[3];
260 uint8_t *field0 = malloc(num_words * 1);
261 uint8_t field0_check_value = 0x2;
262 uint8_t field0_check_mask = 0x6;
263 uint32_t *field1 = malloc(num_words * 4);
264 uint8_t field2_check_value = 0x0;
265 uint8_t field2_check_mask = 0x1;
266 int words_done = 0;
267 int words_scheduled = 0;
268 int i;
269
270 path[0] = TAP_DRSELECT;
271 path[1] = TAP_DRCAPTURE;
272 path[2] = TAP_DRSHIFT;
273
274 memset(&fields, 0, sizeof fields);
275
276 fields[0].num_bits = 3;
277 fields[0].check_value = &field0_check_value;
278 fields[0].check_mask = &field0_check_mask;
279
280 fields[1].num_bits = 32;
281
282 fields[2].num_bits = 1;
283 fields[2].check_value = &field2_check_value;
284 fields[2].check_mask = &field2_check_mask;
285
286 xscale_jtag_set_instr(target->tap,
287 XSCALE_DBGTX << xscale->xscale_variant,
288 TAP_IDLE);
289 jtag_add_runtest(1, TAP_IDLE); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
290
291 /* repeat until all words have been collected */
292 int attempts = 0;
293 while (words_done < num_words)
294 {
295 /* schedule reads */
296 words_scheduled = 0;
297 for (i = words_done; i < num_words; i++)
298 {
299 fields[0].in_value = &field0[i];
300
301 jtag_add_pathmove(3, path);
302
303 fields[1].in_value = (uint8_t *)(field1 + i);
304
305 jtag_add_dr_scan_check(target->tap, 3, fields, TAP_IDLE);
306
307 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
308
309 words_scheduled++;
310 }
311
312 if ((retval = jtag_execute_queue()) != ERROR_OK)
313 {
314 LOG_ERROR("JTAG error while receiving data from debug handler");
315 break;
316 }
317
318 /* examine results */
319 for (i = words_done; i < num_words; i++)
320 {
321 if (!(field0[0] & 1))
322 {
323 /* move backwards if necessary */
324 int j;
325 for (j = i; j < num_words - 1; j++)
326 {
327 field0[j] = field0[j + 1];
328 field1[j] = field1[j + 1];
329 }
330 words_scheduled--;
331 }
332 }
333 if (words_scheduled == 0)
334 {
335 if (attempts++==1000)
336 {
337 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
338 retval = ERROR_TARGET_TIMEOUT;
339 break;
340 }
341 }
342
343 words_done += words_scheduled;
344 }
345
346 for (i = 0; i < num_words; i++)
347 *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
348
349 free(field1);
350
351 return retval;
352 }
353
354 static int xscale_read_tx(struct target *target, int consume)
355 {
356 struct xscale_common *xscale = target_to_xscale(target);
357 tap_state_t path[3];
358 tap_state_t noconsume_path[6];
359 int retval;
360 struct timeval timeout, now;
361 struct scan_field fields[3];
362 uint8_t field0_in = 0x0;
363 uint8_t field0_check_value = 0x2;
364 uint8_t field0_check_mask = 0x6;
365 uint8_t field2_check_value = 0x0;
366 uint8_t field2_check_mask = 0x1;
367
368 xscale_jtag_set_instr(target->tap,
369 XSCALE_DBGTX << xscale->xscale_variant,
370 TAP_IDLE);
371
372 path[0] = TAP_DRSELECT;
373 path[1] = TAP_DRCAPTURE;
374 path[2] = TAP_DRSHIFT;
375
376 noconsume_path[0] = TAP_DRSELECT;
377 noconsume_path[1] = TAP_DRCAPTURE;
378 noconsume_path[2] = TAP_DREXIT1;
379 noconsume_path[3] = TAP_DRPAUSE;
380 noconsume_path[4] = TAP_DREXIT2;
381 noconsume_path[5] = TAP_DRSHIFT;
382
383 memset(&fields, 0, sizeof fields);
384
385 fields[0].num_bits = 3;
386 fields[0].in_value = &field0_in;
387
388 fields[1].num_bits = 32;
389 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
390
391 fields[2].num_bits = 1;
392 uint8_t tmp;
393 fields[2].in_value = &tmp;
394
395 gettimeofday(&timeout, NULL);
396 timeval_add_time(&timeout, 1, 0);
397
398 for (;;)
399 {
400 /* if we want to consume the register content (i.e. clear TX_READY),
401 * we have to go straight from Capture-DR to Shift-DR
402 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
403 */
404 if (consume)
405 jtag_add_pathmove(3, path);
406 else
407 {
408 jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
409 }
410
411 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
412
413 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
414 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
415
416 if ((retval = jtag_execute_queue()) != ERROR_OK)
417 {
418 LOG_ERROR("JTAG error while reading TX");
419 return ERROR_TARGET_TIMEOUT;
420 }
421
422 gettimeofday(&now, NULL);
423 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
424 {
425 LOG_ERROR("time out reading TX register");
426 return ERROR_TARGET_TIMEOUT;
427 }
428 if (!((!(field0_in & 1)) && consume))
429 {
430 goto done;
431 }
432 if (debug_level >= 3)
433 {
434 LOG_DEBUG("waiting 100ms");
435 alive_sleep(100); /* avoid flooding the logs */
436 } else
437 {
438 keep_alive();
439 }
440 }
441 done:
442
443 if (!(field0_in & 1))
444 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
445
446 return ERROR_OK;
447 }
448
449 static int xscale_write_rx(struct target *target)
450 {
451 struct xscale_common *xscale = target_to_xscale(target);
452 int retval;
453 struct timeval timeout, now;
454 struct scan_field fields[3];
455 uint8_t field0_out = 0x0;
456 uint8_t field0_in = 0x0;
457 uint8_t field0_check_value = 0x2;
458 uint8_t field0_check_mask = 0x6;
459 uint8_t field2 = 0x0;
460 uint8_t field2_check_value = 0x0;
461 uint8_t field2_check_mask = 0x1;
462
463 xscale_jtag_set_instr(target->tap,
464 XSCALE_DBGRX << xscale->xscale_variant,
465 TAP_IDLE);
466
467 memset(&fields, 0, sizeof fields);
468
469 fields[0].num_bits = 3;
470 fields[0].out_value = &field0_out;
471 fields[0].in_value = &field0_in;
472
473 fields[1].num_bits = 32;
474 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
475
476 fields[2].num_bits = 1;
477 fields[2].out_value = &field2;
478 uint8_t tmp;
479 fields[2].in_value = &tmp;
480
481 gettimeofday(&timeout, NULL);
482 timeval_add_time(&timeout, 1, 0);
483
484 /* poll until rx_read is low */
485 LOG_DEBUG("polling RX");
486 for (;;)
487 {
488 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
489
490 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
491 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
492
493 if ((retval = jtag_execute_queue()) != ERROR_OK)
494 {
495 LOG_ERROR("JTAG error while writing RX");
496 return retval;
497 }
498
499 gettimeofday(&now, NULL);
500 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
501 {
502 LOG_ERROR("time out writing RX register");
503 return ERROR_TARGET_TIMEOUT;
504 }
505 if (!(field0_in & 1))
506 goto done;
507 if (debug_level >= 3)
508 {
509 LOG_DEBUG("waiting 100ms");
510 alive_sleep(100); /* avoid flooding the logs */
511 } else
512 {
513 keep_alive();
514 }
515 }
516 done:
517
518 /* set rx_valid */
519 field2 = 0x1;
520 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
521
522 if ((retval = jtag_execute_queue()) != ERROR_OK)
523 {
524 LOG_ERROR("JTAG error while writing RX");
525 return retval;
526 }
527
528 return ERROR_OK;
529 }
530
531 /* send count elements of size byte to the debug handler */
532 static int xscale_send(struct target *target, uint8_t *buffer, int count, int size)
533 {
534 struct xscale_common *xscale = target_to_xscale(target);
535 uint32_t t[3];
536 int bits[3];
537 int retval;
538 int done_count = 0;
539
540 xscale_jtag_set_instr(target->tap,
541 XSCALE_DBGRX << xscale->xscale_variant,
542 TAP_IDLE);
543
544 bits[0]=3;
545 t[0]=0;
546 bits[1]=32;
547 t[2]=1;
548 bits[2]=1;
549 int endianness = target->endianness;
550 while (done_count++ < count)
551 {
552 switch (size)
553 {
554 case 4:
555 if (endianness == TARGET_LITTLE_ENDIAN)
556 {
557 t[1]=le_to_h_u32(buffer);
558 } else
559 {
560 t[1]=be_to_h_u32(buffer);
561 }
562 break;
563 case 2:
564 if (endianness == TARGET_LITTLE_ENDIAN)
565 {
566 t[1]=le_to_h_u16(buffer);
567 } else
568 {
569 t[1]=be_to_h_u16(buffer);
570 }
571 break;
572 case 1:
573 t[1]=buffer[0];
574 break;
575 default:
576 LOG_ERROR("BUG: size neither 4, 2 nor 1");
577 return ERROR_INVALID_ARGUMENTS;
578 }
579 jtag_add_dr_out(target->tap,
580 3,
581 bits,
582 t,
583 TAP_IDLE);
584 buffer += size;
585 }
586
587 if ((retval = jtag_execute_queue()) != ERROR_OK)
588 {
589 LOG_ERROR("JTAG error while sending data to debug handler");
590 return retval;
591 }
592
593 return ERROR_OK;
594 }
595
596 static int xscale_send_u32(struct target *target, uint32_t value)
597 {
598 struct xscale_common *xscale = target_to_xscale(target);
599
600 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
601 return xscale_write_rx(target);
602 }
603
604 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
605 {
606 struct xscale_common *xscale = target_to_xscale(target);
607 int retval;
608 struct scan_field fields[3];
609 uint8_t field0 = 0x0;
610 uint8_t field0_check_value = 0x2;
611 uint8_t field0_check_mask = 0x7;
612 uint8_t field2 = 0x0;
613 uint8_t field2_check_value = 0x0;
614 uint8_t field2_check_mask = 0x1;
615
616 if (hold_rst != -1)
617 xscale->hold_rst = hold_rst;
618
619 if (ext_dbg_brk != -1)
620 xscale->external_debug_break = ext_dbg_brk;
621
622 xscale_jtag_set_instr(target->tap,
623 XSCALE_SELDCSR << xscale->xscale_variant,
624 TAP_IDLE);
625
626 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
627 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
628
629 memset(&fields, 0, sizeof fields);
630
631 fields[0].num_bits = 3;
632 fields[0].out_value = &field0;
633 uint8_t tmp;
634 fields[0].in_value = &tmp;
635
636 fields[1].num_bits = 32;
637 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
638
639 fields[2].num_bits = 1;
640 fields[2].out_value = &field2;
641 uint8_t tmp2;
642 fields[2].in_value = &tmp2;
643
644 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
645
646 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
647 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
648
649 if ((retval = jtag_execute_queue()) != ERROR_OK)
650 {
651 LOG_ERROR("JTAG error while writing DCSR");
652 return retval;
653 }
654
655 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
656 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
657
658 return ERROR_OK;
659 }
660
661 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
662 static unsigned int parity (unsigned int v)
663 {
664 // unsigned int ov = v;
665 v ^= v >> 16;
666 v ^= v >> 8;
667 v ^= v >> 4;
668 v &= 0xf;
669 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
670 return (0x6996 >> v) & 1;
671 }
672
673 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
674 {
675 struct xscale_common *xscale = target_to_xscale(target);
676 uint8_t packet[4];
677 uint8_t cmd;
678 int word;
679 struct scan_field fields[2];
680
681 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
682
683 /* LDIC into IR */
684 xscale_jtag_set_instr(target->tap,
685 XSCALE_LDIC << xscale->xscale_variant,
686 TAP_IDLE);
687
688 /* CMD is b011 to load a cacheline into the Mini ICache.
689 * Loading into the main ICache is deprecated, and unused.
690 * It's followed by three zero bits, and 27 address bits.
691 */
692 buf_set_u32(&cmd, 0, 6, 0x3);
693
694 /* virtual address of desired cache line */
695 buf_set_u32(packet, 0, 27, va >> 5);
696
697 memset(&fields, 0, sizeof fields);
698
699 fields[0].num_bits = 6;
700 fields[0].out_value = &cmd;
701
702 fields[1].num_bits = 27;
703 fields[1].out_value = packet;
704
705 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
706
707 /* rest of packet is a cacheline: 8 instructions, with parity */
708 fields[0].num_bits = 32;
709 fields[0].out_value = packet;
710
711 fields[1].num_bits = 1;
712 fields[1].out_value = &cmd;
713
714 for (word = 0; word < 8; word++)
715 {
716 buf_set_u32(packet, 0, 32, buffer[word]);
717
718 uint32_t value;
719 memcpy(&value, packet, sizeof(uint32_t));
720 cmd = parity(value);
721
722 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
723 }
724
725 return jtag_execute_queue();
726 }
727
728 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
729 {
730 struct xscale_common *xscale = target_to_xscale(target);
731 uint8_t packet[4];
732 uint8_t cmd;
733 struct scan_field fields[2];
734
735 xscale_jtag_set_instr(target->tap,
736 XSCALE_LDIC << xscale->xscale_variant,
737 TAP_IDLE);
738
739 /* CMD for invalidate IC line b000, bits [6:4] b000 */
740 buf_set_u32(&cmd, 0, 6, 0x0);
741
742 /* virtual address of desired cache line */
743 buf_set_u32(packet, 0, 27, va >> 5);
744
745 memset(&fields, 0, sizeof fields);
746
747 fields[0].num_bits = 6;
748 fields[0].out_value = &cmd;
749
750 fields[1].num_bits = 27;
751 fields[1].out_value = packet;
752
753 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
754
755 return ERROR_OK;
756 }
757
758 static int xscale_update_vectors(struct target *target)
759 {
760 struct xscale_common *xscale = target_to_xscale(target);
761 int i;
762 int retval;
763
764 uint32_t low_reset_branch, high_reset_branch;
765
766 for (i = 1; i < 8; i++)
767 {
768 /* if there's a static vector specified for this exception, override */
769 if (xscale->static_high_vectors_set & (1 << i))
770 {
771 xscale->high_vectors[i] = xscale->static_high_vectors[i];
772 }
773 else
774 {
775 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
776 if (retval == ERROR_TARGET_TIMEOUT)
777 return retval;
778 if (retval != ERROR_OK)
779 {
780 /* Some of these reads will fail as part of normal execution */
781 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
782 }
783 }
784 }
785
786 for (i = 1; i < 8; i++)
787 {
788 if (xscale->static_low_vectors_set & (1 << i))
789 {
790 xscale->low_vectors[i] = xscale->static_low_vectors[i];
791 }
792 else
793 {
794 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
795 if (retval == ERROR_TARGET_TIMEOUT)
796 return retval;
797 if (retval != ERROR_OK)
798 {
799 /* Some of these reads will fail as part of normal execution */
800 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
801 }
802 }
803 }
804
805 /* calculate branches to debug handler */
806 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
807 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
808
809 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
810 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
811
812 /* invalidate and load exception vectors in mini i-cache */
813 xscale_invalidate_ic_line(target, 0x0);
814 xscale_invalidate_ic_line(target, 0xffff0000);
815
816 xscale_load_ic(target, 0x0, xscale->low_vectors);
817 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
818
819 return ERROR_OK;
820 }
821
822 static int xscale_arch_state(struct target *target)
823 {
824 struct xscale_common *xscale = target_to_xscale(target);
825 struct arm *armv4_5 = &xscale->armv4_5_common;
826
827 static const char *state[] =
828 {
829 "disabled", "enabled"
830 };
831
832 static const char *arch_dbg_reason[] =
833 {
834 "", "\n(processor reset)", "\n(trace buffer full)"
835 };
836
837 if (armv4_5->common_magic != ARM_COMMON_MAGIC)
838 {
839 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
840 return ERROR_INVALID_ARGUMENTS;
841 }
842
843 arm_arch_state(target);
844 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s%s",
845 state[xscale->armv4_5_mmu.mmu_enabled],
846 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
847 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
848 arch_dbg_reason[xscale->arch_debug_reason]);
849
850 return ERROR_OK;
851 }
852
853 static int xscale_poll(struct target *target)
854 {
855 int retval = ERROR_OK;
856
857 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
858 {
859 enum target_state previous_state = target->state;
860 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
861 {
862
863 /* there's data to read from the tx register, we entered debug state */
864 target->state = TARGET_HALTED;
865
866 /* process debug entry, fetching current mode regs */
867 retval = xscale_debug_entry(target);
868 }
869 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
870 {
871 LOG_USER("error while polling TX register, reset CPU");
872 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
873 target->state = TARGET_HALTED;
874 }
875
876 /* debug_entry could have overwritten target state (i.e. immediate resume)
877 * don't signal event handlers in that case
878 */
879 if (target->state != TARGET_HALTED)
880 return ERROR_OK;
881
882 /* if target was running, signal that we halted
883 * otherwise we reentered from debug execution */
884 if (previous_state == TARGET_RUNNING)
885 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
886 else
887 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
888 }
889
890 return retval;
891 }
892
893 static int xscale_debug_entry(struct target *target)
894 {
895 struct xscale_common *xscale = target_to_xscale(target);
896 struct arm *armv4_5 = &xscale->armv4_5_common;
897 uint32_t pc;
898 uint32_t buffer[10];
899 int i;
900 int retval;
901 uint32_t moe;
902
903 /* clear external dbg break (will be written on next DCSR read) */
904 xscale->external_debug_break = 0;
905 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
906 return retval;
907
908 /* get r0, pc, r1 to r7 and cpsr */
909 if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
910 return retval;
911
912 /* move r0 from buffer to register cache */
913 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
914 armv4_5->core_cache->reg_list[0].dirty = 1;
915 armv4_5->core_cache->reg_list[0].valid = 1;
916 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
917
918 /* move pc from buffer to register cache */
919 buf_set_u32(armv4_5->pc->value, 0, 32, buffer[1]);
920 armv4_5->pc->dirty = 1;
921 armv4_5->pc->valid = 1;
922 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
923
924 /* move data from buffer to register cache */
925 for (i = 1; i <= 7; i++)
926 {
927 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
928 armv4_5->core_cache->reg_list[i].dirty = 1;
929 armv4_5->core_cache->reg_list[i].valid = 1;
930 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
931 }
932
933 arm_set_cpsr(armv4_5, buffer[9]);
934 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
935
936 if (!is_arm_mode(armv4_5->core_mode))
937 {
938 target->state = TARGET_UNKNOWN;
939 LOG_ERROR("cpsr contains invalid mode value - communication failure");
940 return ERROR_TARGET_FAILURE;
941 }
942 LOG_DEBUG("target entered debug state in %s mode",
943 arm_mode_name(armv4_5->core_mode));
944
945 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
946 if (armv4_5->spsr) {
947 xscale_receive(target, buffer, 8);
948 buf_set_u32(armv4_5->spsr->value, 0, 32, buffer[7]);
949 armv4_5->spsr->dirty = false;
950 armv4_5->spsr->valid = true;
951 }
952 else
953 {
954 /* r8 to r14, but no spsr */
955 xscale_receive(target, buffer, 7);
956 }
957
958 /* move data from buffer to right banked register in cache */
959 for (i = 8; i <= 14; i++)
960 {
961 struct reg *r = arm_reg_current(armv4_5, i);
962
963 buf_set_u32(r->value, 0, 32, buffer[i - 8]);
964 r->dirty = false;
965 r->valid = true;
966 }
967
968 /* examine debug reason */
969 xscale_read_dcsr(target);
970 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
971
972 /* stored PC (for calculating fixup) */
973 pc = buf_get_u32(armv4_5->pc->value, 0, 32);
974
975 switch (moe)
976 {
977 case 0x0: /* Processor reset */
978 target->debug_reason = DBG_REASON_DBGRQ;
979 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
980 pc -= 4;
981 break;
982 case 0x1: /* Instruction breakpoint hit */
983 target->debug_reason = DBG_REASON_BREAKPOINT;
984 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
985 pc -= 4;
986 break;
987 case 0x2: /* Data breakpoint hit */
988 target->debug_reason = DBG_REASON_WATCHPOINT;
989 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
990 pc -= 4;
991 break;
992 case 0x3: /* BKPT instruction executed */
993 target->debug_reason = DBG_REASON_BREAKPOINT;
994 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
995 pc -= 4;
996 break;
997 case 0x4: /* Ext. debug event */
998 target->debug_reason = DBG_REASON_DBGRQ;
999 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1000 pc -= 4;
1001 break;
1002 case 0x5: /* Vector trap occured */
1003 target->debug_reason = DBG_REASON_BREAKPOINT;
1004 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1005 pc -= 4;
1006 break;
1007 case 0x6: /* Trace buffer full break */
1008 target->debug_reason = DBG_REASON_DBGRQ;
1009 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1010 pc -= 4;
1011 break;
1012 case 0x7: /* Reserved (may flag Hot-Debug support) */
1013 default:
1014 LOG_ERROR("Method of Entry is 'Reserved'");
1015 exit(-1);
1016 break;
1017 }
1018
1019 /* apply PC fixup */
1020 buf_set_u32(armv4_5->pc->value, 0, 32, pc);
1021
1022 /* on the first debug entry, identify cache type */
1023 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1024 {
1025 uint32_t cache_type_reg;
1026
1027 /* read cp15 cache type register */
1028 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1029 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1030
1031 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1032 }
1033
1034 /* examine MMU and Cache settings */
1035 /* read cp15 control register */
1036 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1037 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1038 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1039 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1040 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1041
1042 /* tracing enabled, read collected trace data */
1043 if (xscale->trace.buffer_enabled)
1044 {
1045 xscale_read_trace(target);
1046 xscale->trace.buffer_fill--;
1047
1048 /* resume if we're still collecting trace data */
1049 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1050 && (xscale->trace.buffer_fill > 0))
1051 {
1052 xscale_resume(target, 1, 0x0, 1, 0);
1053 }
1054 else
1055 {
1056 xscale->trace.buffer_enabled = 0;
1057 }
1058 }
1059
1060 return ERROR_OK;
1061 }
1062
1063 static int xscale_halt(struct target *target)
1064 {
1065 struct xscale_common *xscale = target_to_xscale(target);
1066
1067 LOG_DEBUG("target->state: %s",
1068 target_state_name(target));
1069
1070 if (target->state == TARGET_HALTED)
1071 {
1072 LOG_DEBUG("target was already halted");
1073 return ERROR_OK;
1074 }
1075 else if (target->state == TARGET_UNKNOWN)
1076 {
1077 /* this must not happen for a xscale target */
1078 LOG_ERROR("target was in unknown state when halt was requested");
1079 return ERROR_TARGET_INVALID;
1080 }
1081 else if (target->state == TARGET_RESET)
1082 {
1083 LOG_DEBUG("target->state == TARGET_RESET");
1084 }
1085 else
1086 {
1087 /* assert external dbg break */
1088 xscale->external_debug_break = 1;
1089 xscale_read_dcsr(target);
1090
1091 target->debug_reason = DBG_REASON_DBGRQ;
1092 }
1093
1094 return ERROR_OK;
1095 }
1096
1097 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1098 {
1099 struct xscale_common *xscale = target_to_xscale(target);
1100 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1101 int retval;
1102
1103 if (xscale->ibcr0_used)
1104 {
1105 struct breakpoint *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1106
1107 if (ibcr0_bp)
1108 {
1109 xscale_unset_breakpoint(target, ibcr0_bp);
1110 }
1111 else
1112 {
1113 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1114 exit(-1);
1115 }
1116 }
1117
1118 if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
1119 return retval;
1120
1121 return ERROR_OK;
1122 }
1123
1124 static int xscale_disable_single_step(struct target *target)
1125 {
1126 struct xscale_common *xscale = target_to_xscale(target);
1127 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1128 int retval;
1129
1130 if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
1131 return retval;
1132
1133 return ERROR_OK;
1134 }
1135
1136 static void xscale_enable_watchpoints(struct target *target)
1137 {
1138 struct watchpoint *watchpoint = target->watchpoints;
1139
1140 while (watchpoint)
1141 {
1142 if (watchpoint->set == 0)
1143 xscale_set_watchpoint(target, watchpoint);
1144 watchpoint = watchpoint->next;
1145 }
1146 }
1147
1148 static void xscale_enable_breakpoints(struct target *target)
1149 {
1150 struct breakpoint *breakpoint = target->breakpoints;
1151
1152 /* set any pending breakpoints */
1153 while (breakpoint)
1154 {
1155 if (breakpoint->set == 0)
1156 xscale_set_breakpoint(target, breakpoint);
1157 breakpoint = breakpoint->next;
1158 }
1159 }
1160
1161 static int xscale_resume(struct target *target, int current,
1162 uint32_t address, int handle_breakpoints, int debug_execution)
1163 {
1164 struct xscale_common *xscale = target_to_xscale(target);
1165 struct arm *armv4_5 = &xscale->armv4_5_common;
1166 struct breakpoint *breakpoint = target->breakpoints;
1167 uint32_t current_pc;
1168 int retval;
1169 int i;
1170
1171 LOG_DEBUG("-");
1172
1173 if (target->state != TARGET_HALTED)
1174 {
1175 LOG_WARNING("target not halted");
1176 return ERROR_TARGET_NOT_HALTED;
1177 }
1178
1179 if (!debug_execution)
1180 {
1181 target_free_all_working_areas(target);
1182 }
1183
1184 /* update vector tables */
1185 if ((retval = xscale_update_vectors(target)) != ERROR_OK)
1186 return retval;
1187
1188 /* current = 1: continue on current pc, otherwise continue at <address> */
1189 if (!current)
1190 buf_set_u32(armv4_5->pc->value, 0, 32, address);
1191
1192 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1193
1194 /* if we're at the reset vector, we have to simulate the branch */
1195 if (current_pc == 0x0)
1196 {
1197 arm_simulate_step(target, NULL);
1198 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1199 }
1200
1201 /* the front-end may request us not to handle breakpoints */
1202 if (handle_breakpoints)
1203 {
1204 breakpoint = breakpoint_find(target,
1205 buf_get_u32(armv4_5->pc->value, 0, 32));
1206 if (breakpoint != NULL)
1207 {
1208 uint32_t next_pc;
1209
1210 /* there's a breakpoint at the current PC, we have to step over it */
1211 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1212 xscale_unset_breakpoint(target, breakpoint);
1213
1214 /* calculate PC of next instruction */
1215 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1216 {
1217 uint32_t current_opcode;
1218 target_read_u32(target, current_pc, &current_opcode);
1219 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1220 }
1221
1222 LOG_DEBUG("enable single-step");
1223 xscale_enable_single_step(target, next_pc);
1224
1225 /* restore banked registers */
1226 retval = xscale_restore_banked(target);
1227
1228 /* send resume request (command 0x30 or 0x31)
1229 * clean the trace buffer if it is to be enabled (0x62) */
1230 if (xscale->trace.buffer_enabled)
1231 {
1232 xscale_send_u32(target, 0x62);
1233 xscale_send_u32(target, 0x31);
1234 }
1235 else
1236 xscale_send_u32(target, 0x30);
1237
1238 /* send CPSR */
1239 xscale_send_u32(target,
1240 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1241 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1242 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1243
1244 for (i = 7; i >= 0; i--)
1245 {
1246 /* send register */
1247 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1248 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1249 }
1250
1251 /* send PC */
1252 xscale_send_u32(target,
1253 buf_get_u32(armv4_5->pc->value, 0, 32));
1254 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32,
1255 buf_get_u32(armv4_5->pc->value, 0, 32));
1256
1257 /* wait for and process debug entry */
1258 xscale_debug_entry(target);
1259
1260 LOG_DEBUG("disable single-step");
1261 xscale_disable_single_step(target);
1262
1263 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1264 xscale_set_breakpoint(target, breakpoint);
1265 }
1266 }
1267
1268 /* enable any pending breakpoints and watchpoints */
1269 xscale_enable_breakpoints(target);
1270 xscale_enable_watchpoints(target);
1271
1272 /* restore banked registers */
1273 retval = xscale_restore_banked(target);
1274
1275 /* send resume request (command 0x30 or 0x31)
1276 * clean the trace buffer if it is to be enabled (0x62) */
1277 if (xscale->trace.buffer_enabled)
1278 {
1279 xscale_send_u32(target, 0x62);
1280 xscale_send_u32(target, 0x31);
1281 }
1282 else
1283 xscale_send_u32(target, 0x30);
1284
1285 /* send CPSR */
1286 xscale_send_u32(target, buf_get_u32(armv4_5->cpsr->value, 0, 32));
1287 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1288 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1289
1290 for (i = 7; i >= 0; i--)
1291 {
1292 /* send register */
1293 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1294 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1295 }
1296
1297 /* send PC */
1298 xscale_send_u32(target, buf_get_u32(armv4_5->pc->value, 0, 32));
1299 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1300 buf_get_u32(armv4_5->pc->value, 0, 32));
1301
1302 target->debug_reason = DBG_REASON_NOTHALTED;
1303
1304 if (!debug_execution)
1305 {
1306 /* registers are now invalid */
1307 register_cache_invalidate(armv4_5->core_cache);
1308 target->state = TARGET_RUNNING;
1309 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1310 }
1311 else
1312 {
1313 target->state = TARGET_DEBUG_RUNNING;
1314 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1315 }
1316
1317 LOG_DEBUG("target resumed");
1318
1319 return ERROR_OK;
1320 }
1321
1322 static int xscale_step_inner(struct target *target, int current,
1323 uint32_t address, int handle_breakpoints)
1324 {
1325 struct xscale_common *xscale = target_to_xscale(target);
1326 struct arm *armv4_5 = &xscale->armv4_5_common;
1327 uint32_t next_pc;
1328 int retval;
1329 int i;
1330
1331 target->debug_reason = DBG_REASON_SINGLESTEP;
1332
1333 /* calculate PC of next instruction */
1334 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1335 {
1336 uint32_t current_opcode, current_pc;
1337 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1338
1339 target_read_u32(target, current_pc, &current_opcode);
1340 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1341 return retval;
1342 }
1343
1344 LOG_DEBUG("enable single-step");
1345 if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
1346 return retval;
1347
1348 /* restore banked registers */
1349 if ((retval = xscale_restore_banked(target)) != ERROR_OK)
1350 return retval;
1351
1352 /* send resume request (command 0x30 or 0x31)
1353 * clean the trace buffer if it is to be enabled (0x62) */
1354 if (xscale->trace.buffer_enabled)
1355 {
1356 if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
1357 return retval;
1358 if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
1359 return retval;
1360 }
1361 else
1362 if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
1363 return retval;
1364
1365 /* send CPSR */
1366 retval = xscale_send_u32(target,
1367 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1368 if (retval != ERROR_OK)
1369 return retval;
1370 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1371 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1372
1373 for (i = 7; i >= 0; i--)
1374 {
1375 /* send register */
1376 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
1377 return retval;
1378 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1379 }
1380
1381 /* send PC */
1382 retval = xscale_send_u32(target,
1383 buf_get_u32(armv4_5->pc->value, 0, 32));
1384 if (retval != ERROR_OK)
1385 return retval;
1386 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1387 buf_get_u32(armv4_5->pc->value, 0, 32));
1388
1389 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1390
1391 /* registers are now invalid */
1392 register_cache_invalidate(armv4_5->core_cache);
1393
1394 /* wait for and process debug entry */
1395 if ((retval = xscale_debug_entry(target)) != ERROR_OK)
1396 return retval;
1397
1398 LOG_DEBUG("disable single-step");
1399 if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
1400 return retval;
1401
1402 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1403
1404 return ERROR_OK;
1405 }
1406
1407 static int xscale_step(struct target *target, int current,
1408 uint32_t address, int handle_breakpoints)
1409 {
1410 struct arm *armv4_5 = target_to_arm(target);
1411 struct breakpoint *breakpoint = NULL;
1412
1413 uint32_t current_pc;
1414 int retval;
1415
1416 if (target->state != TARGET_HALTED)
1417 {
1418 LOG_WARNING("target not halted");
1419 return ERROR_TARGET_NOT_HALTED;
1420 }
1421
1422 /* current = 1: continue on current pc, otherwise continue at <address> */
1423 if (!current)
1424 buf_set_u32(armv4_5->pc->value, 0, 32, address);
1425
1426 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1427
1428 /* if we're at the reset vector, we have to simulate the step */
1429 if (current_pc == 0x0)
1430 {
1431 if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
1432 return retval;
1433 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1434
1435 target->debug_reason = DBG_REASON_SINGLESTEP;
1436 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1437
1438 return ERROR_OK;
1439 }
1440
1441 /* the front-end may request us not to handle breakpoints */
1442 if (handle_breakpoints)
1443 breakpoint = breakpoint_find(target,
1444 buf_get_u32(armv4_5->pc->value, 0, 32));
1445 if (breakpoint != NULL) {
1446 retval = xscale_unset_breakpoint(target, breakpoint);
1447 if (retval != ERROR_OK)
1448 return retval;
1449 }
1450
1451 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1452
1453 if (breakpoint)
1454 {
1455 xscale_set_breakpoint(target, breakpoint);
1456 }
1457
1458 LOG_DEBUG("target stepped");
1459
1460 return ERROR_OK;
1461
1462 }
1463
1464 static int xscale_assert_reset(struct target *target)
1465 {
1466 struct xscale_common *xscale = target_to_xscale(target);
1467
1468 LOG_DEBUG("target->state: %s",
1469 target_state_name(target));
1470
1471 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1472 * end up in T-L-R, which would reset JTAG
1473 */
1474 xscale_jtag_set_instr(target->tap,
1475 XSCALE_SELDCSR << xscale->xscale_variant,
1476 TAP_IDLE);
1477
1478 /* set Hold reset, Halt mode and Trap Reset */
1479 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1480 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1481 xscale_write_dcsr(target, 1, 0);
1482
1483 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1484 xscale_jtag_set_instr(target->tap, ~0, TAP_IDLE);
1485 jtag_execute_queue();
1486
1487 /* assert reset */
1488 jtag_add_reset(0, 1);
1489
1490 /* sleep 1ms, to be sure we fulfill any requirements */
1491 jtag_add_sleep(1000);
1492 jtag_execute_queue();
1493
1494 target->state = TARGET_RESET;
1495
1496 if (target->reset_halt)
1497 {
1498 int retval;
1499 if ((retval = target_halt(target)) != ERROR_OK)
1500 return retval;
1501 }
1502
1503 return ERROR_OK;
1504 }
1505
1506 static int xscale_deassert_reset(struct target *target)
1507 {
1508 struct xscale_common *xscale = target_to_xscale(target);
1509 struct breakpoint *breakpoint = target->breakpoints;
1510
1511 LOG_DEBUG("-");
1512
1513 xscale->ibcr_available = 2;
1514 xscale->ibcr0_used = 0;
1515 xscale->ibcr1_used = 0;
1516
1517 xscale->dbr_available = 2;
1518 xscale->dbr0_used = 0;
1519 xscale->dbr1_used = 0;
1520
1521 /* mark all hardware breakpoints as unset */
1522 while (breakpoint)
1523 {
1524 if (breakpoint->type == BKPT_HARD)
1525 {
1526 breakpoint->set = 0;
1527 }
1528 breakpoint = breakpoint->next;
1529 }
1530
1531 register_cache_invalidate(xscale->armv4_5_common.core_cache);
1532
1533 /* FIXME mark hardware watchpoints got unset too. Also,
1534 * at least some of the XScale registers are invalid...
1535 */
1536
1537 /*
1538 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1539 * contents got invalidated. Safer to force that, so writing new
1540 * contents can't ever fail..
1541 */
1542 {
1543 uint32_t address;
1544 unsigned buf_cnt;
1545 const uint8_t *buffer = xscale_debug_handler;
1546 int retval;
1547
1548 /* release SRST */
1549 jtag_add_reset(0, 0);
1550
1551 /* wait 300ms; 150 and 100ms were not enough */
1552 jtag_add_sleep(300*1000);
1553
1554 jtag_add_runtest(2030, TAP_IDLE);
1555 jtag_execute_queue();
1556
1557 /* set Hold reset, Halt mode and Trap Reset */
1558 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1559 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1560 xscale_write_dcsr(target, 1, 0);
1561
1562 /* Load the debug handler into the mini-icache. Since
1563 * it's using halt mode (not monitor mode), it runs in
1564 * "Special Debug State" for access to registers, memory,
1565 * coprocessors, trace data, etc.
1566 */
1567 address = xscale->handler_address;
1568 for (unsigned binary_size = sizeof xscale_debug_handler - 1;
1569 binary_size > 0;
1570 binary_size -= buf_cnt, buffer += buf_cnt)
1571 {
1572 uint32_t cache_line[8];
1573 unsigned i;
1574
1575 buf_cnt = binary_size;
1576 if (buf_cnt > 32)
1577 buf_cnt = 32;
1578
1579 for (i = 0; i < buf_cnt; i += 4)
1580 {
1581 /* convert LE buffer to host-endian uint32_t */
1582 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1583 }
1584
1585 for (; i < 32; i += 4)
1586 {
1587 cache_line[i / 4] = 0xe1a08008;
1588 }
1589
1590 /* only load addresses other than the reset vectors */
1591 if ((address % 0x400) != 0x0)
1592 {
1593 retval = xscale_load_ic(target, address,
1594 cache_line);
1595 if (retval != ERROR_OK)
1596 return retval;
1597 }
1598
1599 address += buf_cnt;
1600 };
1601
1602 retval = xscale_load_ic(target, 0x0,
1603 xscale->low_vectors);
1604 if (retval != ERROR_OK)
1605 return retval;
1606 retval = xscale_load_ic(target, 0xffff0000,
1607 xscale->high_vectors);
1608 if (retval != ERROR_OK)
1609 return retval;
1610
1611 jtag_add_runtest(30, TAP_IDLE);
1612
1613 jtag_add_sleep(100000);
1614
1615 /* set Hold reset, Halt mode and Trap Reset */
1616 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1617 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1618 xscale_write_dcsr(target, 1, 0);
1619
1620 /* clear Hold reset to let the target run (should enter debug handler) */
1621 xscale_write_dcsr(target, 0, 1);
1622 target->state = TARGET_RUNNING;
1623
1624 if (!target->reset_halt)
1625 {
1626 jtag_add_sleep(10000);
1627
1628 /* we should have entered debug now */
1629 xscale_debug_entry(target);
1630 target->state = TARGET_HALTED;
1631
1632 /* resume the target */
1633 xscale_resume(target, 1, 0x0, 1, 0);
1634 }
1635 }
1636
1637 return ERROR_OK;
1638 }
1639
1640 static int xscale_read_core_reg(struct target *target, struct reg *r,
1641 int num, enum arm_mode mode)
1642 {
1643 /** \todo add debug handler support for core register reads */
1644 LOG_ERROR("not implemented");
1645 return ERROR_OK;
1646 }
1647
1648 static int xscale_write_core_reg(struct target *target, struct reg *r,
1649 int num, enum arm_mode mode, uint32_t value)
1650 {
1651 /** \todo add debug handler support for core register writes */
1652 LOG_ERROR("not implemented");
1653 return ERROR_OK;
1654 }
1655
1656 static int xscale_full_context(struct target *target)
1657 {
1658 struct arm *armv4_5 = target_to_arm(target);
1659
1660 uint32_t *buffer;
1661
1662 int i, j;
1663
1664 LOG_DEBUG("-");
1665
1666 if (target->state != TARGET_HALTED)
1667 {
1668 LOG_WARNING("target not halted");
1669 return ERROR_TARGET_NOT_HALTED;
1670 }
1671
1672 buffer = malloc(4 * 8);
1673
1674 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1675 * we can't enter User mode on an XScale (unpredictable),
1676 * but User shares registers with SYS
1677 */
1678 for (i = 1; i < 7; i++)
1679 {
1680 enum arm_mode mode = armv4_5_number_to_mode(i);
1681 bool valid = true;
1682 struct reg *r;
1683
1684 if (mode == ARM_MODE_USR)
1685 continue;
1686
1687 /* check if there are invalid registers in the current mode
1688 */
1689 for (j = 0; valid && j <= 16; j++)
1690 {
1691 if (!ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1692 mode, j).valid)
1693 valid = false;
1694 }
1695 if (valid)
1696 continue;
1697
1698 /* request banked registers */
1699 xscale_send_u32(target, 0x0);
1700
1701 /* send CPSR for desired bank mode */
1702 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1703
1704 /* get banked registers: r8 to r14; and SPSR
1705 * except in USR/SYS mode
1706 */
1707 if (mode != ARM_MODE_SYS) {
1708 /* SPSR */
1709 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1710 mode, 16);
1711
1712 xscale_receive(target, buffer, 8);
1713
1714 buf_set_u32(r->value, 0, 32, buffer[7]);
1715 r->dirty = false;
1716 r->valid = true;
1717 } else {
1718 xscale_receive(target, buffer, 7);
1719 }
1720
1721 /* move data from buffer to register cache */
1722 for (j = 8; j <= 14; j++)
1723 {
1724 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1725 mode, j);
1726
1727 buf_set_u32(r->value, 0, 32, buffer[j - 8]);
1728 r->dirty = false;
1729 r->valid = true;
1730 }
1731 }
1732
1733 free(buffer);
1734
1735 return ERROR_OK;
1736 }
1737
1738 static int xscale_restore_banked(struct target *target)
1739 {
1740 struct arm *armv4_5 = target_to_arm(target);
1741
1742 int i, j;
1743
1744 if (target->state != TARGET_HALTED)
1745 {
1746 LOG_WARNING("target not halted");
1747 return ERROR_TARGET_NOT_HALTED;
1748 }
1749
1750 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1751 * and check if any banked registers need to be written. Ignore
1752 * USR mode (number 0) in favor of SYS; we can't enter User mode on
1753 * an XScale (unpredictable), but they share all registers.
1754 */
1755 for (i = 1; i < 7; i++)
1756 {
1757 enum arm_mode mode = armv4_5_number_to_mode(i);
1758 struct reg *r;
1759
1760 if (mode == ARM_MODE_USR)
1761 continue;
1762
1763 /* check if there are dirty registers in this mode */
1764 for (j = 8; j <= 14; j++)
1765 {
1766 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1767 mode, j).dirty)
1768 goto dirty;
1769 }
1770
1771 /* if not USR/SYS, check if the SPSR needs to be written */
1772 if (mode != ARM_MODE_SYS)
1773 {
1774 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1775 mode, 16).dirty)
1776 goto dirty;
1777 }
1778
1779 /* there's nothing to flush for this mode */
1780 continue;
1781
1782 dirty:
1783 /* command 0x1: "send banked registers" */
1784 xscale_send_u32(target, 0x1);
1785
1786 /* send CPSR for desired mode */
1787 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1788
1789 /* send r8 to r14/lr ... only FIQ needs more than r13..r14,
1790 * but this protocol doesn't understand that nuance.
1791 */
1792 for (j = 8; j <= 14; j++) {
1793 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1794 mode, j);
1795 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1796 r->dirty = false;
1797 }
1798
1799 /* send spsr if not in USR/SYS mode */
1800 if (mode != ARM_MODE_SYS) {
1801 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1802 mode, 16);
1803 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1804 r->dirty = false;
1805 }
1806 }
1807
1808 return ERROR_OK;
1809 }
1810
1811 static int xscale_read_memory(struct target *target, uint32_t address,
1812 uint32_t size, uint32_t count, uint8_t *buffer)
1813 {
1814 struct xscale_common *xscale = target_to_xscale(target);
1815 uint32_t *buf32;
1816 uint32_t i;
1817 int retval;
1818
1819 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1820
1821 if (target->state != TARGET_HALTED)
1822 {
1823 LOG_WARNING("target not halted");
1824 return ERROR_TARGET_NOT_HALTED;
1825 }
1826
1827 /* sanitize arguments */
1828 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1829 return ERROR_INVALID_ARGUMENTS;
1830
1831 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1832 return ERROR_TARGET_UNALIGNED_ACCESS;
1833
1834 /* send memory read request (command 0x1n, n: access size) */
1835 if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
1836 return retval;
1837
1838 /* send base address for read request */
1839 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1840 return retval;
1841
1842 /* send number of requested data words */
1843 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1844 return retval;
1845
1846 /* receive data from target (count times 32-bit words in host endianness) */
1847 buf32 = malloc(4 * count);
1848 if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
1849 return retval;
1850
1851 /* extract data from host-endian buffer into byte stream */
1852 for (i = 0; i < count; i++)
1853 {
1854 switch (size)
1855 {
1856 case 4:
1857 target_buffer_set_u32(target, buffer, buf32[i]);
1858 buffer += 4;
1859 break;
1860 case 2:
1861 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1862 buffer += 2;
1863 break;
1864 case 1:
1865 *buffer++ = buf32[i] & 0xff;
1866 break;
1867 default:
1868 LOG_ERROR("invalid read size");
1869 return ERROR_INVALID_ARGUMENTS;
1870 }
1871 }
1872
1873 free(buf32);
1874
1875 /* examine DCSR, to see if Sticky Abort (SA) got set */
1876 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1877 return retval;
1878 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1879 {
1880 /* clear SA bit */
1881 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1882 return retval;
1883
1884 return ERROR_TARGET_DATA_ABORT;
1885 }
1886
1887 return ERROR_OK;
1888 }
1889
1890 static int xscale_read_phys_memory(struct target *target, uint32_t address,
1891 uint32_t size, uint32_t count, uint8_t *buffer)
1892 {
1893 struct xscale_common *xscale = target_to_xscale(target);
1894
1895 /* with MMU inactive, there are only physical addresses */
1896 if (!xscale->armv4_5_mmu.mmu_enabled)
1897 return xscale_read_memory(target, address, size, count, buffer);
1898
1899 /** \todo: provide a non-stub implementation of this routine. */
1900 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1901 target_name(target), __func__);
1902 return ERROR_FAIL;
1903 }
1904
1905 static int xscale_write_memory(struct target *target, uint32_t address,
1906 uint32_t size, uint32_t count, uint8_t *buffer)
1907 {
1908 struct xscale_common *xscale = target_to_xscale(target);
1909 int retval;
1910
1911 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1912
1913 if (target->state != TARGET_HALTED)
1914 {
1915 LOG_WARNING("target not halted");
1916 return ERROR_TARGET_NOT_HALTED;
1917 }
1918
1919 /* sanitize arguments */
1920 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1921 return ERROR_INVALID_ARGUMENTS;
1922
1923 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1924 return ERROR_TARGET_UNALIGNED_ACCESS;
1925
1926 /* send memory write request (command 0x2n, n: access size) */
1927 if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
1928 return retval;
1929
1930 /* send base address for read request */
1931 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1932 return retval;
1933
1934 /* send number of requested data words to be written*/
1935 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1936 return retval;
1937
1938 /* extract data from host-endian buffer into byte stream */
1939 #if 0
1940 for (i = 0; i < count; i++)
1941 {
1942 switch (size)
1943 {
1944 case 4:
1945 value = target_buffer_get_u32(target, buffer);
1946 xscale_send_u32(target, value);
1947 buffer += 4;
1948 break;
1949 case 2:
1950 value = target_buffer_get_u16(target, buffer);
1951 xscale_send_u32(target, value);
1952 buffer += 2;
1953 break;
1954 case 1:
1955 value = *buffer;
1956 xscale_send_u32(target, value);
1957 buffer += 1;
1958 break;
1959 default:
1960 LOG_ERROR("should never get here");
1961 exit(-1);
1962 }
1963 }
1964 #endif
1965 if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
1966 return retval;
1967
1968 /* examine DCSR, to see if Sticky Abort (SA) got set */
1969 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1970 return retval;
1971 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1972 {
1973 /* clear SA bit */
1974 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1975 return retval;
1976
1977 return ERROR_TARGET_DATA_ABORT;
1978 }
1979
1980 return ERROR_OK;
1981 }
1982
1983 static int xscale_write_phys_memory(struct target *target, uint32_t address,
1984 uint32_t size, uint32_t count, uint8_t *buffer)
1985 {
1986 struct xscale_common *xscale = target_to_xscale(target);
1987
1988 /* with MMU inactive, there are only physical addresses */
1989 if (!xscale->armv4_5_mmu.mmu_enabled)
1990 return xscale_read_memory(target, address, size, count, buffer);
1991
1992 /** \todo: provide a non-stub implementation of this routine. */
1993 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1994 target_name(target), __func__);
1995 return ERROR_FAIL;
1996 }
1997
1998 static int xscale_bulk_write_memory(struct target *target, uint32_t address,
1999 uint32_t count, uint8_t *buffer)
2000 {
2001 return xscale_write_memory(target, address, 4, count, buffer);
2002 }
2003
2004 static uint32_t xscale_get_ttb(struct target *target)
2005 {
2006 struct xscale_common *xscale = target_to_xscale(target);
2007 uint32_t ttb;
2008
2009 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
2010 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
2011
2012 return ttb;
2013 }
2014
2015 static void xscale_disable_mmu_caches(struct target *target, int mmu,
2016 int d_u_cache, int i_cache)
2017 {
2018 struct xscale_common *xscale = target_to_xscale(target);
2019 uint32_t cp15_control;
2020
2021 /* read cp15 control register */
2022 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2023 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2024
2025 if (mmu)
2026 cp15_control &= ~0x1U;
2027
2028 if (d_u_cache)
2029 {
2030 /* clean DCache */
2031 xscale_send_u32(target, 0x50);
2032 xscale_send_u32(target, xscale->cache_clean_address);
2033
2034 /* invalidate DCache */
2035 xscale_send_u32(target, 0x51);
2036
2037 cp15_control &= ~0x4U;
2038 }
2039
2040 if (i_cache)
2041 {
2042 /* invalidate ICache */
2043 xscale_send_u32(target, 0x52);
2044 cp15_control &= ~0x1000U;
2045 }
2046
2047 /* write new cp15 control register */
2048 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2049
2050 /* execute cpwait to ensure outstanding operations complete */
2051 xscale_send_u32(target, 0x53);
2052 }
2053
2054 static void xscale_enable_mmu_caches(struct target *target, int mmu,
2055 int d_u_cache, int i_cache)
2056 {
2057 struct xscale_common *xscale = target_to_xscale(target);
2058 uint32_t cp15_control;
2059
2060 /* read cp15 control register */
2061 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2062 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2063
2064 if (mmu)
2065 cp15_control |= 0x1U;
2066
2067 if (d_u_cache)
2068 cp15_control |= 0x4U;
2069
2070 if (i_cache)
2071 cp15_control |= 0x1000U;
2072
2073 /* write new cp15 control register */
2074 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2075
2076 /* execute cpwait to ensure outstanding operations complete */
2077 xscale_send_u32(target, 0x53);
2078 }
2079
2080 static int xscale_set_breakpoint(struct target *target,
2081 struct breakpoint *breakpoint)
2082 {
2083 int retval;
2084 struct xscale_common *xscale = target_to_xscale(target);
2085
2086 if (target->state != TARGET_HALTED)
2087 {
2088 LOG_WARNING("target not halted");
2089 return ERROR_TARGET_NOT_HALTED;
2090 }
2091
2092 if (breakpoint->set)
2093 {
2094 LOG_WARNING("breakpoint already set");
2095 return ERROR_OK;
2096 }
2097
2098 if (breakpoint->type == BKPT_HARD)
2099 {
2100 uint32_t value = breakpoint->address | 1;
2101 if (!xscale->ibcr0_used)
2102 {
2103 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2104 xscale->ibcr0_used = 1;
2105 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2106 }
2107 else if (!xscale->ibcr1_used)
2108 {
2109 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2110 xscale->ibcr1_used = 1;
2111 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2112 }
2113 else
2114 {
2115 LOG_ERROR("BUG: no hardware comparator available");
2116 return ERROR_OK;
2117 }
2118 }
2119 else if (breakpoint->type == BKPT_SOFT)
2120 {
2121 if (breakpoint->length == 4)
2122 {
2123 /* keep the original instruction in target endianness */
2124 if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2125 {
2126 return retval;
2127 }
2128 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2129 if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2130 {
2131 return retval;
2132 }
2133 }
2134 else
2135 {
2136 /* keep the original instruction in target endianness */
2137 if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2138 {
2139 return retval;
2140 }
2141 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2142 if ((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2143 {
2144 return retval;
2145 }
2146 }
2147 breakpoint->set = 1;
2148 }
2149
2150 return ERROR_OK;
2151 }
2152
2153 static int xscale_add_breakpoint(struct target *target,
2154 struct breakpoint *breakpoint)
2155 {
2156 struct xscale_common *xscale = target_to_xscale(target);
2157
2158 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2159 {
2160 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2161 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2162 }
2163
2164 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2165 {
2166 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2167 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2168 }
2169
2170 if (breakpoint->type == BKPT_HARD)
2171 {
2172 xscale->ibcr_available--;
2173 }
2174
2175 return ERROR_OK;
2176 }
2177
2178 static int xscale_unset_breakpoint(struct target *target,
2179 struct breakpoint *breakpoint)
2180 {
2181 int retval;
2182 struct xscale_common *xscale = target_to_xscale(target);
2183
2184 if (target->state != TARGET_HALTED)
2185 {
2186 LOG_WARNING("target not halted");
2187 return ERROR_TARGET_NOT_HALTED;
2188 }
2189
2190 if (!breakpoint->set)
2191 {
2192 LOG_WARNING("breakpoint not set");
2193 return ERROR_OK;
2194 }
2195
2196 if (breakpoint->type == BKPT_HARD)
2197 {
2198 if (breakpoint->set == 1)
2199 {
2200 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2201 xscale->ibcr0_used = 0;
2202 }
2203 else if (breakpoint->set == 2)
2204 {
2205 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2206 xscale->ibcr1_used = 0;
2207 }
2208 breakpoint->set = 0;
2209 }
2210 else
2211 {
2212 /* restore original instruction (kept in target endianness) */
2213 if (breakpoint->length == 4)
2214 {
2215 if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2216 {
2217 return retval;
2218 }
2219 }
2220 else
2221 {
2222 if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2223 {
2224 return retval;
2225 }
2226 }
2227 breakpoint->set = 0;
2228 }
2229
2230 return ERROR_OK;
2231 }
2232
2233 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2234 {
2235 struct xscale_common *xscale = target_to_xscale(target);
2236
2237 if (target->state != TARGET_HALTED)
2238 {
2239 LOG_WARNING("target not halted");
2240 return ERROR_TARGET_NOT_HALTED;
2241 }
2242
2243 if (breakpoint->set)
2244 {
2245 xscale_unset_breakpoint(target, breakpoint);
2246 }
2247
2248 if (breakpoint->type == BKPT_HARD)
2249 xscale->ibcr_available++;
2250
2251 return ERROR_OK;
2252 }
2253
2254 static int xscale_set_watchpoint(struct target *target,
2255 struct watchpoint *watchpoint)
2256 {
2257 struct xscale_common *xscale = target_to_xscale(target);
2258 uint8_t enable = 0;
2259 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2260 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2261
2262 if (target->state != TARGET_HALTED)
2263 {
2264 LOG_WARNING("target not halted");
2265 return ERROR_TARGET_NOT_HALTED;
2266 }
2267
2268 xscale_get_reg(dbcon);
2269
2270 switch (watchpoint->rw)
2271 {
2272 case WPT_READ:
2273 enable = 0x3;
2274 break;
2275 case WPT_ACCESS:
2276 enable = 0x2;
2277 break;
2278 case WPT_WRITE:
2279 enable = 0x1;
2280 break;
2281 default:
2282 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2283 }
2284
2285 if (!xscale->dbr0_used)
2286 {
2287 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2288 dbcon_value |= enable;
2289 xscale_set_reg_u32(dbcon, dbcon_value);
2290 watchpoint->set = 1;
2291 xscale->dbr0_used = 1;
2292 }
2293 else if (!xscale->dbr1_used)
2294 {
2295 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2296 dbcon_value |= enable << 2;
2297 xscale_set_reg_u32(dbcon, dbcon_value);
2298 watchpoint->set = 2;
2299 xscale->dbr1_used = 1;
2300 }
2301 else
2302 {
2303 LOG_ERROR("BUG: no hardware comparator available");
2304 return ERROR_OK;
2305 }
2306
2307 return ERROR_OK;
2308 }
2309
2310 static int xscale_add_watchpoint(struct target *target,
2311 struct watchpoint *watchpoint)
2312 {
2313 struct xscale_common *xscale = target_to_xscale(target);
2314
2315 if (xscale->dbr_available < 1)
2316 {
2317 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2318 }
2319
2320 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2321 {
2322 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2323 }
2324
2325 xscale->dbr_available--;
2326
2327 return ERROR_OK;
2328 }
2329
2330 static int xscale_unset_watchpoint(struct target *target,
2331 struct watchpoint *watchpoint)
2332 {
2333 struct xscale_common *xscale = target_to_xscale(target);
2334 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2335 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2336
2337 if (target->state != TARGET_HALTED)
2338 {
2339 LOG_WARNING("target not halted");
2340 return ERROR_TARGET_NOT_HALTED;
2341 }
2342
2343 if (!watchpoint->set)
2344 {
2345 LOG_WARNING("breakpoint not set");
2346 return ERROR_OK;
2347 }
2348
2349 if (watchpoint->set == 1)
2350 {
2351 dbcon_value &= ~0x3;
2352 xscale_set_reg_u32(dbcon, dbcon_value);
2353 xscale->dbr0_used = 0;
2354 }
2355 else if (watchpoint->set == 2)
2356 {
2357 dbcon_value &= ~0xc;
2358 xscale_set_reg_u32(dbcon, dbcon_value);
2359 xscale->dbr1_used = 0;
2360 }
2361 watchpoint->set = 0;
2362
2363 return ERROR_OK;
2364 }
2365
2366 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2367 {
2368 struct xscale_common *xscale = target_to_xscale(target);
2369
2370 if (target->state != TARGET_HALTED)
2371 {
2372 LOG_WARNING("target not halted");
2373 return ERROR_TARGET_NOT_HALTED;
2374 }
2375
2376 if (watchpoint->set)
2377 {
2378 xscale_unset_watchpoint(target, watchpoint);
2379 }
2380
2381 xscale->dbr_available++;
2382
2383 return ERROR_OK;
2384 }
2385
2386 static int xscale_get_reg(struct reg *reg)
2387 {
2388 struct xscale_reg *arch_info = reg->arch_info;
2389 struct target *target = arch_info->target;
2390 struct xscale_common *xscale = target_to_xscale(target);
2391
2392 /* DCSR, TX and RX are accessible via JTAG */
2393 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2394 {
2395 return xscale_read_dcsr(arch_info->target);
2396 }
2397 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2398 {
2399 /* 1 = consume register content */
2400 return xscale_read_tx(arch_info->target, 1);
2401 }
2402 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2403 {
2404 /* can't read from RX register (host -> debug handler) */
2405 return ERROR_OK;
2406 }
2407 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2408 {
2409 /* can't (explicitly) read from TXRXCTRL register */
2410 return ERROR_OK;
2411 }
2412 else /* Other DBG registers have to be transfered by the debug handler */
2413 {
2414 /* send CP read request (command 0x40) */
2415 xscale_send_u32(target, 0x40);
2416
2417 /* send CP register number */
2418 xscale_send_u32(target, arch_info->dbg_handler_number);
2419
2420 /* read register value */
2421 xscale_read_tx(target, 1);
2422 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2423
2424 reg->dirty = 0;
2425 reg->valid = 1;
2426 }
2427
2428 return ERROR_OK;
2429 }
2430
2431 static int xscale_set_reg(struct reg *reg, uint8_t* buf)
2432 {
2433 struct xscale_reg *arch_info = reg->arch_info;
2434 struct target *target = arch_info->target;
2435 struct xscale_common *xscale = target_to_xscale(target);
2436 uint32_t value = buf_get_u32(buf, 0, 32);
2437
2438 /* DCSR, TX and RX are accessible via JTAG */
2439 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2440 {
2441 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2442 return xscale_write_dcsr(arch_info->target, -1, -1);
2443 }
2444 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2445 {
2446 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2447 return xscale_write_rx(arch_info->target);
2448 }
2449 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2450 {
2451 /* can't write to TX register (debug-handler -> host) */
2452 return ERROR_OK;
2453 }
2454 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2455 {
2456 /* can't (explicitly) write to TXRXCTRL register */
2457 return ERROR_OK;
2458 }
2459 else /* Other DBG registers have to be transfered by the debug handler */
2460 {
2461 /* send CP write request (command 0x41) */
2462 xscale_send_u32(target, 0x41);
2463
2464 /* send CP register number */
2465 xscale_send_u32(target, arch_info->dbg_handler_number);
2466
2467 /* send CP register value */
2468 xscale_send_u32(target, value);
2469 buf_set_u32(reg->value, 0, 32, value);
2470 }
2471
2472 return ERROR_OK;
2473 }
2474
2475 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2476 {
2477 struct xscale_common *xscale = target_to_xscale(target);
2478 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2479 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2480
2481 /* send CP write request (command 0x41) */
2482 xscale_send_u32(target, 0x41);
2483
2484 /* send CP register number */
2485 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2486
2487 /* send CP register value */
2488 xscale_send_u32(target, value);
2489 buf_set_u32(dcsr->value, 0, 32, value);
2490
2491 return ERROR_OK;
2492 }
2493
2494 static int xscale_read_trace(struct target *target)
2495 {
2496 struct xscale_common *xscale = target_to_xscale(target);
2497 struct arm *armv4_5 = &xscale->armv4_5_common;
2498 struct xscale_trace_data **trace_data_p;
2499
2500 /* 258 words from debug handler
2501 * 256 trace buffer entries
2502 * 2 checkpoint addresses
2503 */
2504 uint32_t trace_buffer[258];
2505 int is_address[256];
2506 int i, j;
2507
2508 if (target->state != TARGET_HALTED)
2509 {
2510 LOG_WARNING("target must be stopped to read trace data");
2511 return ERROR_TARGET_NOT_HALTED;
2512 }
2513
2514 /* send read trace buffer command (command 0x61) */
2515 xscale_send_u32(target, 0x61);
2516
2517 /* receive trace buffer content */
2518 xscale_receive(target, trace_buffer, 258);
2519
2520 /* parse buffer backwards to identify address entries */
2521 for (i = 255; i >= 0; i--)
2522 {
2523 is_address[i] = 0;
2524 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2525 ((trace_buffer[i] & 0xf0) == 0xd0))
2526 {
2527 if (i >= 3)
2528 is_address[--i] = 1;
2529 if (i >= 2)
2530 is_address[--i] = 1;
2531 if (i >= 1)
2532 is_address[--i] = 1;
2533 if (i >= 0)
2534 is_address[--i] = 1;
2535 }
2536 }
2537
2538
2539 /* search first non-zero entry */
2540 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2541 ;
2542
2543 if (j == 256)
2544 {
2545 LOG_DEBUG("no trace data collected");
2546 return ERROR_XSCALE_NO_TRACE_DATA;
2547 }
2548
2549 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2550 ;
2551
2552 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2553 (*trace_data_p)->next = NULL;
2554 (*trace_data_p)->chkpt0 = trace_buffer[256];
2555 (*trace_data_p)->chkpt1 = trace_buffer[257];
2556 (*trace_data_p)->last_instruction =
2557 buf_get_u32(armv4_5->pc->value, 0, 32);
2558 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2559 (*trace_data_p)->depth = 256 - j;
2560
2561 for (i = j; i < 256; i++)
2562 {
2563 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2564 if (is_address[i])
2565 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2566 else
2567 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2568 }
2569
2570 return ERROR_OK;
2571 }
2572
2573 static int xscale_read_instruction(struct target *target,
2574 struct arm_instruction *instruction)
2575 {
2576 struct xscale_common *xscale = target_to_xscale(target);
2577 int i;
2578 int section = -1;
2579 size_t size_read;
2580 uint32_t opcode;
2581 int retval;
2582
2583 if (!xscale->trace.image)
2584 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2585
2586 /* search for the section the current instruction belongs to */
2587 for (i = 0; i < xscale->trace.image->num_sections; i++)
2588 {
2589 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2590 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2591 {
2592 section = i;
2593 break;
2594 }
2595 }
2596
2597 if (section == -1)
2598 {
2599 /* current instruction couldn't be found in the image */
2600 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2601 }
2602
2603 if (xscale->trace.core_state == ARM_STATE_ARM)
2604 {
2605 uint8_t buf[4];
2606 if ((retval = image_read_section(xscale->trace.image, section,
2607 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2608 4, buf, &size_read)) != ERROR_OK)
2609 {
2610 LOG_ERROR("error while reading instruction: %i", retval);
2611 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2612 }
2613 opcode = target_buffer_get_u32(target, buf);
2614 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2615 }
2616 else if (xscale->trace.core_state == ARM_STATE_THUMB)
2617 {
2618 uint8_t buf[2];
2619 if ((retval = image_read_section(xscale->trace.image, section,
2620 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2621 2, buf, &size_read)) != ERROR_OK)
2622 {
2623 LOG_ERROR("error while reading instruction: %i", retval);
2624 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2625 }
2626 opcode = target_buffer_get_u16(target, buf);
2627 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2628 }
2629 else
2630 {
2631 LOG_ERROR("BUG: unknown core state encountered");
2632 exit(-1);
2633 }
2634
2635 return ERROR_OK;
2636 }
2637
2638 static int xscale_branch_address(struct xscale_trace_data *trace_data,
2639 int i, uint32_t *target)
2640 {
2641 /* if there are less than four entries prior to the indirect branch message
2642 * we can't extract the address */
2643 if (i < 4)
2644 {
2645 return -1;
2646 }
2647
2648 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2649 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2650
2651 return 0;
2652 }
2653
2654 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2655 {
2656 struct xscale_common *xscale = target_to_xscale(target);
2657 int next_pc_ok = 0;
2658 uint32_t next_pc = 0x0;
2659 struct xscale_trace_data *trace_data = xscale->trace.data;
2660 int retval;
2661
2662 while (trace_data)
2663 {
2664 int i, chkpt;
2665 int rollover;
2666 int branch;
2667 int exception;
2668 xscale->trace.core_state = ARM_STATE_ARM;
2669
2670 chkpt = 0;
2671 rollover = 0;
2672
2673 for (i = 0; i < trace_data->depth; i++)
2674 {
2675 next_pc_ok = 0;
2676 branch = 0;
2677 exception = 0;
2678
2679 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2680 continue;
2681
2682 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2683 {
2684 case 0: /* Exceptions */
2685 case 1:
2686 case 2:
2687 case 3:
2688 case 4:
2689 case 5:
2690 case 6:
2691 case 7:
2692 exception = (trace_data->entries[i].data & 0x70) >> 4;
2693 next_pc_ok = 1;
2694 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2695 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2696 break;
2697 case 8: /* Direct Branch */
2698 branch = 1;
2699 break;
2700 case 9: /* Indirect Branch */
2701 branch = 1;
2702 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2703 {
2704 next_pc_ok = 1;
2705 }
2706 break;
2707 case 13: /* Checkpointed Indirect Branch */
2708 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2709 {
2710 next_pc_ok = 1;
2711 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2712 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2713 LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2714 }
2715 /* explicit fall-through */
2716 case 12: /* Checkpointed Direct Branch */
2717 branch = 1;
2718 if (chkpt == 0)
2719 {
2720 next_pc_ok = 1;
2721 next_pc = trace_data->chkpt0;
2722 chkpt++;
2723 }
2724 else if (chkpt == 1)
2725 {
2726 next_pc_ok = 1;
2727 next_pc = trace_data->chkpt0;
2728 chkpt++;
2729 }
2730 else
2731 {
2732 LOG_WARNING("more than two checkpointed branches encountered");
2733 }
2734 break;
2735 case 15: /* Roll-over */
2736 rollover++;
2737 continue;
2738 default: /* Reserved */
2739 command_print(cmd_ctx, "--- reserved trace message ---");
2740 LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2741 return ERROR_OK;
2742 }
2743
2744 if (xscale->trace.pc_ok)
2745 {
2746 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2747 struct arm_instruction instruction;
2748
2749 if ((exception == 6) || (exception == 7))
2750 {
2751 /* IRQ or FIQ exception, no instruction executed */
2752 executed -= 1;
2753 }
2754
2755 while (executed-- >= 0)
2756 {
2757 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2758 {
2759 /* can't continue tracing with no image available */
2760 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2761 {
2762 return retval;
2763 }
2764 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2765 {
2766 /* TODO: handle incomplete images */
2767 }
2768 }
2769
2770 /* a precise abort on a load to the PC is included in the incremental
2771 * word count, other instructions causing data aborts are not included
2772 */
2773 if ((executed == 0) && (exception == 4)
2774 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2775 {
2776 if ((instruction.type == ARM_LDM)
2777 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2778 {
2779 executed--;
2780 }
2781 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2782 && (instruction.info.load_store.Rd != 15))
2783 {
2784 executed--;
2785 }
2786 }
2787
2788 /* only the last instruction executed
2789 * (the one that caused the control flow change)
2790 * could be a taken branch
2791 */
2792 if (((executed == -1) && (branch == 1)) &&
2793 (((instruction.type == ARM_B) ||
2794 (instruction.type == ARM_BL) ||
2795 (instruction.type == ARM_BLX)) &&
2796 (instruction.info.b_bl_bx_blx.target_address != 0xffffffff)))
2797 {
2798 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2799 }
2800 else
2801 {
2802 xscale->trace.current_pc += (xscale->trace.core_state == ARM_STATE_ARM) ? 4 : 2;
2803 }
2804 command_print(cmd_ctx, "%s", instruction.text);
2805 }
2806
2807 rollover = 0;
2808 }
2809
2810 if (next_pc_ok)
2811 {
2812 xscale->trace.current_pc = next_pc;
2813 xscale->trace.pc_ok = 1;
2814 }
2815 }
2816
2817 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARM_STATE_ARM) ? 4 : 2)
2818 {
2819 struct arm_instruction instruction;
2820 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2821 {
2822 /* can't continue tracing with no image available */
2823 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2824 {
2825 return retval;
2826 }
2827 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2828 {
2829 /* TODO: handle incomplete images */
2830 }
2831 }
2832 command_print(cmd_ctx, "%s", instruction.text);
2833 }
2834
2835 trace_data = trace_data->next;
2836 }
2837
2838 return ERROR_OK;
2839 }
2840
2841 static const struct reg_arch_type xscale_reg_type = {
2842 .get = xscale_get_reg,
2843 .set = xscale_set_reg,
2844 };
2845
2846 static void xscale_build_reg_cache(struct target *target)
2847 {
2848 struct xscale_common *xscale = target_to_xscale(target);
2849 struct arm *armv4_5 = &xscale->armv4_5_common;
2850 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2851 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2852 int i;
2853 int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
2854
2855 (*cache_p) = arm_build_reg_cache(target, armv4_5);
2856
2857 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2858 cache_p = &(*cache_p)->next;
2859
2860 /* fill in values for the xscale reg cache */
2861 (*cache_p)->name = "XScale registers";
2862 (*cache_p)->next = NULL;
2863 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
2864 (*cache_p)->num_regs = num_regs;
2865
2866 for (i = 0; i < num_regs; i++)
2867 {
2868 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2869 (*cache_p)->reg_list[i].value = calloc(4, 1);
2870 (*cache_p)->reg_list[i].dirty = 0;
2871 (*cache_p)->reg_list[i].valid = 0;
2872 (*cache_p)->reg_list[i].size = 32;
2873 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2874 (*cache_p)->reg_list[i].type = &xscale_reg_type;
2875 arch_info[i] = xscale_reg_arch_info[i];
2876 arch_info[i].target = target;
2877 }
2878
2879 xscale->reg_cache = (*cache_p);
2880 }
2881
2882 static int xscale_init_target(struct command_context *cmd_ctx,
2883 struct target *target)
2884 {
2885 xscale_build_reg_cache(target);
2886 return ERROR_OK;
2887 }
2888
2889 static int xscale_init_arch_info(struct target *target,
2890 struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
2891 {
2892 struct arm *armv4_5;
2893 uint32_t high_reset_branch, low_reset_branch;
2894 int i;
2895
2896 armv4_5 = &xscale->armv4_5_common;
2897
2898 /* store architecture specfic data */
2899 xscale->common_magic = XSCALE_COMMON_MAGIC;
2900
2901 /* we don't really *need* a variant param ... */
2902 if (variant) {
2903 int ir_length = 0;
2904
2905 if (strcmp(variant, "pxa250") == 0
2906 || strcmp(variant, "pxa255") == 0
2907 || strcmp(variant, "pxa26x") == 0)
2908 ir_length = 5;
2909 else if (strcmp(variant, "pxa27x") == 0
2910 || strcmp(variant, "ixp42x") == 0
2911 || strcmp(variant, "ixp45x") == 0
2912 || strcmp(variant, "ixp46x") == 0)
2913 ir_length = 7;
2914 else if (strcmp(variant, "pxa3xx") == 0)
2915 ir_length = 11;
2916 else
2917 LOG_WARNING("%s: unrecognized variant %s",
2918 tap->dotted_name, variant);
2919
2920 if (ir_length && ir_length != tap->ir_length) {
2921 LOG_WARNING("%s: IR length for %s is %d; fixing",
2922 tap->dotted_name, variant, ir_length);
2923 tap->ir_length = ir_length;
2924 }
2925 }
2926
2927 /* PXA3xx shifts the JTAG instructions */
2928 if (tap->ir_length == 11)
2929 xscale->xscale_variant = XSCALE_PXA3XX;
2930 else
2931 xscale->xscale_variant = XSCALE_IXP4XX_PXA2XX;
2932
2933 /* the debug handler isn't installed (and thus not running) at this time */
2934 xscale->handler_address = 0xfe000800;
2935
2936 /* clear the vectors we keep locally for reference */
2937 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2938 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2939
2940 /* no user-specified vectors have been configured yet */
2941 xscale->static_low_vectors_set = 0x0;
2942 xscale->static_high_vectors_set = 0x0;
2943
2944 /* calculate branches to debug handler */
2945 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
2946 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
2947
2948 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
2949 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
2950
2951 for (i = 1; i <= 7; i++)
2952 {
2953 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2954 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2955 }
2956
2957 /* 64kB aligned region used for DCache cleaning */
2958 xscale->cache_clean_address = 0xfffe0000;
2959
2960 xscale->hold_rst = 0;
2961 xscale->external_debug_break = 0;
2962
2963 xscale->ibcr_available = 2;
2964 xscale->ibcr0_used = 0;
2965 xscale->ibcr1_used = 0;
2966
2967 xscale->dbr_available = 2;
2968 xscale->dbr0_used = 0;
2969 xscale->dbr1_used = 0;
2970
2971 LOG_INFO("%s: hardware has 2 breakpoints and 2 watchpoints",
2972 target_name(target));
2973
2974 xscale->arm_bkpt = ARMV5_BKPT(0x0);
2975 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
2976
2977 xscale->vector_catch = 0x1;
2978
2979 xscale->trace.capture_status = TRACE_IDLE;
2980 xscale->trace.data = NULL;
2981 xscale->trace.image = NULL;
2982 xscale->trace.buffer_enabled = 0;
2983 xscale->trace.buffer_fill = 0;
2984
2985 /* prepare ARMv4/5 specific information */
2986 armv4_5->arch_info = xscale;
2987 armv4_5->read_core_reg = xscale_read_core_reg;
2988 armv4_5->write_core_reg = xscale_write_core_reg;
2989 armv4_5->full_context = xscale_full_context;
2990
2991 arm_init_arch_info(target, armv4_5);
2992
2993 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
2994 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
2995 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
2996 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
2997 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
2998 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
2999 xscale->armv4_5_mmu.has_tiny_pages = 1;
3000 xscale->armv4_5_mmu.mmu_enabled = 0;
3001
3002 return ERROR_OK;
3003 }
3004
3005 static int xscale_target_create(struct target *target, Jim_Interp *interp)
3006 {
3007 struct xscale_common *xscale;
3008
3009 if (sizeof xscale_debug_handler - 1 > 0x800) {
3010 LOG_ERROR("debug_handler.bin: larger than 2kb");
3011 return ERROR_FAIL;
3012 }
3013
3014 xscale = calloc(1, sizeof(*xscale));
3015 if (!xscale)
3016 return ERROR_FAIL;
3017
3018 return xscale_init_arch_info(target, xscale, target->tap,
3019 target->variant);
3020 }
3021
3022 COMMAND_HANDLER(xscale_handle_debug_handler_command)
3023 {
3024 struct target *target = NULL;
3025 struct xscale_common *xscale;
3026 int retval;
3027 uint32_t handler_address;
3028
3029 if (CMD_ARGC < 2)
3030 {
3031 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3032 return ERROR_OK;
3033 }
3034
3035 if ((target = get_target(CMD_ARGV[0])) == NULL)
3036 {
3037 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3038 return ERROR_FAIL;
3039 }
3040
3041 xscale = target_to_xscale(target);
3042 retval = xscale_verify_pointer(CMD_CTX, xscale);
3043 if (retval != ERROR_OK)
3044 return retval;
3045
3046 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3047
3048 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3049 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3050 {
3051 xscale->handler_address = handler_address;
3052 }
3053 else
3054 {
3055 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3056 return ERROR_FAIL;
3057 }
3058
3059 return ERROR_OK;
3060 }
3061
3062 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3063 {
3064 struct target *target = NULL;
3065 struct xscale_common *xscale;
3066 int retval;
3067 uint32_t cache_clean_address;
3068
3069 if (CMD_ARGC < 2)
3070 {
3071 return ERROR_COMMAND_SYNTAX_ERROR;
3072 }
3073
3074 target = get_target(CMD_ARGV[0]);
3075 if (target == NULL)
3076 {
3077 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3078 return ERROR_FAIL;
3079 }
3080 xscale = target_to_xscale(target);
3081 retval = xscale_verify_pointer(CMD_CTX, xscale);
3082 if (retval != ERROR_OK)
3083 return retval;
3084
3085 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3086
3087 if (cache_clean_address & 0xffff)
3088 {
3089 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3090 }
3091 else
3092 {
3093 xscale->cache_clean_address = cache_clean_address;
3094 }
3095
3096 return ERROR_OK;
3097 }
3098
3099 COMMAND_HANDLER(xscale_handle_cache_info_command)
3100 {
3101 struct target *target = get_current_target(CMD_CTX);
3102 struct xscale_common *xscale = target_to_xscale(target);
3103 int retval;
3104
3105 retval = xscale_verify_pointer(CMD_CTX, xscale);
3106 if (retval != ERROR_OK)
3107 return retval;
3108
3109 return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
3110 }
3111
3112 static int xscale_virt2phys(struct target *target,
3113 uint32_t virtual, uint32_t *physical)
3114 {
3115 struct xscale_common *xscale = target_to_xscale(target);
3116 int type;
3117 uint32_t cb;
3118 int domain;
3119 uint32_t ap;
3120
3121 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3122 LOG_ERROR(xscale_not);
3123 return ERROR_TARGET_INVALID;
3124 }
3125
3126 uint32_t ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3127 if (type == -1)
3128 {
3129 return ret;
3130 }
3131 *physical = ret;
3132 return ERROR_OK;
3133 }
3134
3135 static int xscale_mmu(struct target *target, int *enabled)
3136 {
3137 struct xscale_common *xscale = target_to_xscale(target);
3138
3139 if (target->state != TARGET_HALTED)
3140 {
3141 LOG_ERROR("Target not halted");
3142 return ERROR_TARGET_INVALID;
3143 }
3144 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3145 return ERROR_OK;
3146 }
3147
3148 COMMAND_HANDLER(xscale_handle_mmu_command)
3149 {
3150 struct target *target = get_current_target(CMD_CTX);
3151 struct xscale_common *xscale = target_to_xscale(target);
3152 int retval;
3153
3154 retval = xscale_verify_pointer(CMD_CTX, xscale);
3155 if (retval != ERROR_OK)
3156 return retval;
3157
3158 if (target->state != TARGET_HALTED)
3159 {
3160 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3161 return ERROR_OK;
3162 }
3163
3164 if (CMD_ARGC >= 1)
3165 {
3166 bool enable;
3167 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3168 if (enable)
3169 xscale_enable_mmu_caches(target, 1, 0, 0);
3170 else
3171 xscale_disable_mmu_caches(target, 1, 0, 0);
3172 xscale->armv4_5_mmu.mmu_enabled = enable;
3173 }
3174
3175 command_print(CMD_CTX, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3176
3177 return ERROR_OK;
3178 }
3179
3180 COMMAND_HANDLER(xscale_handle_idcache_command)
3181 {
3182 struct target *target = get_current_target(CMD_CTX);
3183 struct xscale_common *xscale = target_to_xscale(target);
3184
3185 int retval = xscale_verify_pointer(CMD_CTX, xscale);
3186 if (retval != ERROR_OK)
3187 return retval;
3188
3189 if (target->state != TARGET_HALTED)
3190 {
3191 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3192 return ERROR_OK;
3193 }
3194
3195 bool icache = false;
3196 if (strcmp(CMD_NAME, "icache") == 0)
3197 icache = true;
3198 if (CMD_ARGC >= 1)
3199 {
3200 bool enable;
3201 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3202 if (icache) {
3203 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3204 if (enable)
3205 xscale_enable_mmu_caches(target, 0, 0, 1);
3206 else
3207 xscale_disable_mmu_caches(target, 0, 0, 1);
3208 } else {
3209 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3210 if (enable)
3211 xscale_enable_mmu_caches(target, 0, 1, 0);
3212 else
3213 xscale_disable_mmu_caches(target, 0, 1, 0);
3214 }
3215 }
3216
3217 bool enabled = icache ?
3218 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3219 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3220 const char *msg = enabled ? "enabled" : "disabled";
3221 command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
3222
3223 return ERROR_OK;
3224 }
3225
3226 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3227 {
3228 struct target *target = get_current_target(CMD_CTX);
3229 struct xscale_common *xscale = target_to_xscale(target);
3230 int retval;
3231
3232 retval = xscale_verify_pointer(CMD_CTX, xscale);
3233 if (retval != ERROR_OK)
3234 return retval;
3235
3236 if (CMD_ARGC < 1)
3237 {
3238 command_print(CMD_CTX, "usage: xscale vector_catch [mask]");
3239 }
3240 else
3241 {
3242 COMMAND_PARSE_NUMBER(u8, CMD_ARGV[0], xscale->vector_catch);
3243 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3244 xscale_write_dcsr(target, -1, -1);
3245 }
3246
3247 command_print(CMD_CTX, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3248
3249 return ERROR_OK;
3250 }
3251
3252
3253 COMMAND_HANDLER(xscale_handle_vector_table_command)
3254 {
3255 struct target *target = get_current_target(CMD_CTX);
3256 struct xscale_common *xscale = target_to_xscale(target);
3257 int err = 0;
3258 int retval;
3259
3260 retval = xscale_verify_pointer(CMD_CTX, xscale);
3261 if (retval != ERROR_OK)
3262 return retval;
3263
3264 if (CMD_ARGC == 0) /* print current settings */
3265 {
3266 int idx;
3267
3268 command_print(CMD_CTX, "active user-set static vectors:");
3269 for (idx = 1; idx < 8; idx++)
3270 if (xscale->static_low_vectors_set & (1 << idx))
3271 command_print(CMD_CTX, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
3272 for (idx = 1; idx < 8; idx++)
3273 if (xscale->static_high_vectors_set & (1 << idx))
3274 command_print(CMD_CTX, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
3275 return ERROR_OK;
3276 }
3277
3278 if (CMD_ARGC != 3)
3279 err = 1;
3280 else
3281 {
3282 int idx;
3283 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3284 uint32_t vec;
3285 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3286
3287 if (idx < 1 || idx >= 8)
3288 err = 1;
3289
3290 if (!err && strcmp(CMD_ARGV[0], "low") == 0)
3291 {
3292 xscale->static_low_vectors_set |= (1<<idx);
3293 xscale->static_low_vectors[idx] = vec;
3294 }
3295 else if (!err && (strcmp(CMD_ARGV[0], "high") == 0))
3296 {
3297 xscale->static_high_vectors_set |= (1<<idx);
3298 xscale->static_high_vectors[idx] = vec;
3299 }
3300 else
3301 err = 1;
3302 }
3303
3304 if (err)
3305 command_print(CMD_CTX, "usage: xscale vector_table <high|low> <index> <code>");
3306
3307 return ERROR_OK;
3308 }
3309
3310
3311 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3312 {
3313 struct target *target = get_current_target(CMD_CTX);
3314 struct xscale_common *xscale = target_to_xscale(target);
3315 struct arm *armv4_5 = &xscale->armv4_5_common;
3316 uint32_t dcsr_value;
3317 int retval;
3318
3319 retval = xscale_verify_pointer(CMD_CTX, xscale);
3320 if (retval != ERROR_OK)
3321 return retval;
3322
3323 if (target->state != TARGET_HALTED)
3324 {
3325 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3326 return ERROR_OK;
3327 }
3328
3329 if ((CMD_ARGC >= 1) && (strcmp("enable", CMD_ARGV[0]) == 0))
3330 {
3331 struct xscale_trace_data *td, *next_td;
3332 xscale->trace.buffer_enabled = 1;
3333
3334 /* free old trace data */
3335 td = xscale->trace.data;
3336 while (td)
3337 {
3338 next_td = td->next;
3339
3340 if (td->entries)
3341 free(td->entries);
3342 free(td);
3343 td = next_td;
3344 }
3345 xscale->trace.data = NULL;
3346 }
3347 else if ((CMD_ARGC >= 1) && (strcmp("disable", CMD_ARGV[0]) == 0))
3348 {
3349 xscale->trace.buffer_enabled = 0;
3350 }
3351
3352 if ((CMD_ARGC >= 2) && (strcmp("fill", CMD_ARGV[1]) == 0))
3353 {
3354 uint32_t fill = 1;
3355 if (CMD_ARGC >= 3)
3356 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], fill);
3357 xscale->trace.buffer_fill = fill;
3358 }
3359 else if ((CMD_ARGC >= 2) && (strcmp("wrap", CMD_ARGV[1]) == 0))
3360 {
3361 xscale->trace.buffer_fill = -1;
3362 }
3363
3364 if (xscale->trace.buffer_enabled)
3365 {
3366 /* if we enable the trace buffer in fill-once
3367 * mode we know the address of the first instruction */
3368 xscale->trace.pc_ok = 1;
3369 xscale->trace.current_pc =
3370 buf_get_u32(armv4_5->pc->value, 0, 32);
3371 }
3372 else
3373 {
3374 /* otherwise the address is unknown, and we have no known good PC */
3375 xscale->trace.pc_ok = 0;
3376 }
3377
3378 command_print(CMD_CTX, "trace buffer %s (%s)",
3379 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3380 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3381
3382 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3383 if (xscale->trace.buffer_fill >= 0)
3384 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3385 else
3386 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3387
3388 return ERROR_OK;
3389 }
3390
3391 COMMAND_HANDLER(xscale_handle_trace_image_command)
3392 {
3393 struct target *target = get_current_target(CMD_CTX);
3394 struct xscale_common *xscale = target_to_xscale(target);
3395 int retval;
3396
3397 if (CMD_ARGC < 1)
3398 {
3399 command_print(CMD_CTX, "usage: xscale trace_image <file> [base address] [type]");
3400 return ERROR_OK;
3401 }
3402
3403 retval = xscale_verify_pointer(CMD_CTX, xscale);
3404 if (retval != ERROR_OK)
3405 return retval;
3406
3407 if (xscale->trace.image)
3408 {
3409 image_close(xscale->trace.image);
3410 free(xscale->trace.image);
3411 command_print(CMD_CTX, "previously loaded image found and closed");
3412 }
3413
3414 xscale->trace.image = malloc(sizeof(struct image));
3415 xscale->trace.image->base_address_set = 0;
3416 xscale->trace.image->start_address_set = 0;
3417
3418 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3419 if (CMD_ARGC >= 2)
3420 {
3421 xscale->trace.image->base_address_set = 1;
3422 COMMAND_PARSE_NUMBER(llong, CMD_ARGV[1], xscale->trace.image->base_address);
3423 }
3424 else
3425 {
3426 xscale->trace.image->base_address_set = 0;
3427 }
3428
3429 if (image_open(xscale->trace.image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3430 {
3431 free(xscale->trace.image);
3432 xscale->trace.image = NULL;
3433 return ERROR_OK;
3434 }
3435
3436 return ERROR_OK;
3437 }
3438
3439 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3440 {
3441 struct target *target = get_current_target(CMD_CTX);
3442 struct xscale_common *xscale = target_to_xscale(target);
3443 struct xscale_trace_data *trace_data;
3444 struct fileio file;
3445 int retval;
3446
3447 retval = xscale_verify_pointer(CMD_CTX, xscale);
3448 if (retval != ERROR_OK)
3449 return retval;
3450
3451 if (target->state != TARGET_HALTED)
3452 {
3453 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3454 return ERROR_OK;
3455 }
3456
3457 if (CMD_ARGC < 1)
3458 {
3459 command_print(CMD_CTX, "usage: xscale dump_trace <file>");
3460 return ERROR_OK;
3461 }
3462
3463 trace_data = xscale->trace.data;
3464
3465 if (!trace_data)
3466 {
3467 command_print(CMD_CTX, "no trace data collected");
3468 return ERROR_OK;
3469 }
3470
3471 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3472 {
3473 return ERROR_OK;
3474 }
3475
3476 while (trace_data)
3477 {
3478 int i;
3479
3480 fileio_write_u32(&file, trace_data->chkpt0);
3481 fileio_write_u32(&file, trace_data->chkpt1);
3482 fileio_write_u32(&file, trace_data->last_instruction);
3483 fileio_write_u32(&file, trace_data->depth);
3484
3485 for (i = 0; i < trace_data->depth; i++)
3486 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3487
3488 trace_data = trace_data->next;
3489 }
3490
3491 fileio_close(&file);
3492
3493 return ERROR_OK;
3494 }
3495
3496 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3497 {
3498 struct target *target = get_current_target(CMD_CTX);
3499 struct xscale_common *xscale = target_to_xscale(target);
3500 int retval;
3501
3502 retval = xscale_verify_pointer(CMD_CTX, xscale);
3503 if (retval != ERROR_OK)
3504 return retval;
3505
3506 xscale_analyze_trace(target, CMD_CTX);
3507
3508 return ERROR_OK;
3509 }
3510
3511 COMMAND_HANDLER(xscale_handle_cp15)
3512 {
3513 struct target *target = get_current_target(CMD_CTX);
3514 struct xscale_common *xscale = target_to_xscale(target);
3515 int retval;
3516
3517 retval = xscale_verify_pointer(CMD_CTX, xscale);
3518 if (retval != ERROR_OK)
3519 return retval;
3520
3521 if (target->state != TARGET_HALTED)
3522 {
3523 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3524 return ERROR_OK;
3525 }
3526 uint32_t reg_no = 0;
3527 struct reg *reg = NULL;
3528 if (CMD_ARGC > 0)
3529 {
3530 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3531 /*translate from xscale cp15 register no to openocd register*/
3532 switch (reg_no)
3533 {
3534 case 0:
3535 reg_no = XSCALE_MAINID;
3536 break;
3537 case 1:
3538 reg_no = XSCALE_CTRL;
3539 break;
3540 case 2:
3541 reg_no = XSCALE_TTB;
3542 break;
3543 case 3:
3544 reg_no = XSCALE_DAC;
3545 break;
3546 case 5:
3547 reg_no = XSCALE_FSR;
3548 break;
3549 case 6:
3550 reg_no = XSCALE_FAR;
3551 break;
3552 case 13:
3553 reg_no = XSCALE_PID;
3554 break;
3555 case 15:
3556 reg_no = XSCALE_CPACCESS;
3557 break;
3558 default:
3559 command_print(CMD_CTX, "invalid register number");
3560 return ERROR_INVALID_ARGUMENTS;
3561 }
3562 reg = &xscale->reg_cache->reg_list[reg_no];
3563
3564 }
3565 if (CMD_ARGC == 1)
3566 {
3567 uint32_t value;
3568
3569 /* read cp15 control register */
3570 xscale_get_reg(reg);
3571 value = buf_get_u32(reg->value, 0, 32);
3572 command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
3573 }
3574 else if (CMD_ARGC == 2)
3575 {
3576 uint32_t value;
3577 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3578
3579 /* send CP write request (command 0x41) */
3580 xscale_send_u32(target, 0x41);
3581
3582 /* send CP register number */
3583 xscale_send_u32(target, reg_no);
3584
3585 /* send CP register value */
3586 xscale_send_u32(target, value);
3587
3588 /* execute cpwait to ensure outstanding operations complete */
3589 xscale_send_u32(target, 0x53);
3590 }
3591 else
3592 {
3593 command_print(CMD_CTX, "usage: cp15 [register]<, [value]>");
3594 }
3595
3596 return ERROR_OK;
3597 }
3598
3599 static const struct command_registration xscale_exec_command_handlers[] = {
3600 {
3601 .name = "cache_info",
3602 .handler = xscale_handle_cache_info_command,
3603 .mode = COMMAND_EXEC,
3604 .help = "display information about CPU caches",
3605 },
3606 {
3607 .name = "mmu",
3608 .handler = xscale_handle_mmu_command,
3609 .mode = COMMAND_EXEC,
3610 .help = "enable or disable the MMU",
3611 .usage = "['enable'|'disable']",
3612 },
3613 {
3614 .name = "icache",
3615 .handler = xscale_handle_idcache_command,
3616 .mode = COMMAND_EXEC,
3617 .help = "display ICache state, optionally enabling or "
3618 "disabling it",
3619 .usage = "['enable'|'disable']",
3620 },
3621 {
3622 .name = "dcache",
3623 .handler = xscale_handle_idcache_command,
3624 .mode = COMMAND_EXEC,
3625 .help = "display DCache state, optionally enabling or "
3626 "disabling it",
3627 .usage = "['enable'|'disable']",
3628 },
3629 {
3630 .name = "vector_catch",
3631 .handler = xscale_handle_vector_catch_command,
3632 .mode = COMMAND_EXEC,
3633 .help = "set or display 8-bit mask of vectors "
3634 "that should trigger debug entry",
3635 .usage = "[mask]",
3636 },
3637 {
3638 .name = "vector_table",
3639 .handler = xscale_handle_vector_table_command,
3640 .mode = COMMAND_EXEC,
3641 .help = "set vector table entry in mini-ICache, "
3642 "or display current tables",
3643 .usage = "[('high'|'low') index code]",
3644 },
3645 {
3646 .name = "trace_buffer",
3647 .handler = xscale_handle_trace_buffer_command,
3648 .mode = COMMAND_EXEC,
3649 .help = "display trace buffer status, enable or disable "
3650 "tracing, and optionally reconfigure trace mode",
3651 .usage = "['enable'|'disable' ['fill' number|'wrap']]",
3652 },
3653 {
3654 .name = "dump_trace",
3655 .handler = xscale_handle_dump_trace_command,
3656 .mode = COMMAND_EXEC,
3657 .help = "dump content of trace buffer to file",
3658 .usage = "filename",
3659 },
3660 {
3661 .name = "analyze_trace",
3662 .handler = xscale_handle_analyze_trace_buffer_command,
3663 .mode = COMMAND_EXEC,
3664 .help = "analyze content of trace buffer",
3665 .usage = "",
3666 },
3667 {
3668 .name = "trace_image",
3669 .handler = xscale_handle_trace_image_command,
3670 .mode = COMMAND_EXEC,
3671 .help = "load image from file to address (default 0)",
3672 .usage = "filename [offset [filetype]]",
3673 },
3674 {
3675 .name = "cp15",
3676 .handler = xscale_handle_cp15,
3677 .mode = COMMAND_EXEC,
3678 .help = "Read or write coprocessor 15 register.",
3679 .usage = "register [value]",
3680 },
3681 COMMAND_REGISTRATION_DONE
3682 };
3683 static const struct command_registration xscale_any_command_handlers[] = {
3684 {
3685 .name = "debug_handler",
3686 .handler = xscale_handle_debug_handler_command,
3687 .mode = COMMAND_ANY,
3688 .help = "Change address used for debug handler.",
3689 .usage = "target address",
3690 },
3691 {
3692 .name = "cache_clean_address",
3693 .handler = xscale_handle_cache_clean_address_command,
3694 .mode = COMMAND_ANY,
3695 .help = "Change address used for cleaning data cache.",
3696 .usage = "address",
3697 },
3698 {
3699 .chain = xscale_exec_command_handlers,
3700 },
3701 COMMAND_REGISTRATION_DONE
3702 };
3703 static const struct command_registration xscale_command_handlers[] = {
3704 {
3705 .chain = arm_command_handlers,
3706 },
3707 {
3708 .name = "xscale",
3709 .mode = COMMAND_ANY,
3710 .help = "xscale command group",
3711 .chain = xscale_any_command_handlers,
3712 },
3713 COMMAND_REGISTRATION_DONE
3714 };
3715
3716 struct target_type xscale_target =
3717 {
3718 .name = "xscale",
3719
3720 .poll = xscale_poll,
3721 .arch_state = xscale_arch_state,
3722
3723 .target_request_data = NULL,
3724
3725 .halt = xscale_halt,
3726 .resume = xscale_resume,
3727 .step = xscale_step,
3728
3729 .assert_reset = xscale_assert_reset,
3730 .deassert_reset = xscale_deassert_reset,
3731 .soft_reset_halt = NULL,
3732
3733 /* REVISIT on some cores, allow exporting iwmmxt registers ... */
3734 .get_gdb_reg_list = arm_get_gdb_reg_list,
3735
3736 .read_memory = xscale_read_memory,
3737 .read_phys_memory = xscale_read_phys_memory,
3738 .write_memory = xscale_write_memory,
3739 .write_phys_memory = xscale_write_phys_memory,
3740 .bulk_write_memory = xscale_bulk_write_memory,
3741
3742 .checksum_memory = arm_checksum_memory,
3743 .blank_check_memory = arm_blank_check_memory,
3744
3745 .run_algorithm = armv4_5_run_algorithm,
3746
3747 .add_breakpoint = xscale_add_breakpoint,
3748 .remove_breakpoint = xscale_remove_breakpoint,
3749 .add_watchpoint = xscale_add_watchpoint,
3750 .remove_watchpoint = xscale_remove_watchpoint,
3751
3752 .commands = xscale_command_handlers,
3753 .target_create = xscale_target_create,
3754 .init_target = xscale_init_target,
3755
3756 .virt2phys = xscale_virt2phys,
3757 .mmu = xscale_mmu
3758 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)