xscale: fix analyze_trace for trace data collected in wrap mode
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
29
30 #include "breakpoints.h"
31 #include "xscale.h"
32 #include "target_type.h"
33 #include "arm_jtag.h"
34 #include "arm_simulator.h"
35 #include "arm_disassembler.h"
36 #include <helper/time_support.h>
37 #include "register.h"
38 #include "image.h"
39 #include "arm_opcodes.h"
40 #include "armv4_5.h"
41
42
43 /*
44 * Important XScale documents available as of October 2009 include:
45 *
46 * Intel XScale® Core Developer’s Manual, January 2004
47 * Order Number: 273473-002
48 * This has a chapter detailing debug facilities, and punts some
49 * details to chip-specific microarchitecture documents.
50 *
51 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
52 * Document Number: 273539-005
53 * Less detailed than the developer's manual, but summarizes those
54 * missing details (for most XScales) and gives LOTS of notes about
55 * debugger/handler interaction issues. Presents a simpler reset
56 * and load-handler sequence than the arch doc. (Note, OpenOCD
57 * doesn't currently support "Hot-Debug" as defined there.)
58 *
59 * Chip-specific microarchitecture documents may also be useful.
60 */
61
62
63 /* forward declarations */
64 static int xscale_resume(struct target *, int current,
65 uint32_t address, int handle_breakpoints, int debug_execution);
66 static int xscale_debug_entry(struct target *);
67 static int xscale_restore_banked(struct target *);
68 static int xscale_get_reg(struct reg *reg);
69 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
70 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
71 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
72 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
73 static int xscale_read_trace(struct target *);
74
75
76 /* This XScale "debug handler" is loaded into the processor's
77 * mini-ICache, which is 2K of code writable only via JTAG.
78 *
79 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
80 * binary files cleanly. It's string oriented, and terminates them
81 * with a NUL character. Better would be to generate the constants
82 * and let other code decide names, scoping, and other housekeeping.
83 */
84 static /* unsigned const char xscale_debug_handler[] = ... */
85 #include "xscale_debug.h"
86
87 static char *const xscale_reg_list[] =
88 {
89 "XSCALE_MAINID", /* 0 */
90 "XSCALE_CACHETYPE",
91 "XSCALE_CTRL",
92 "XSCALE_AUXCTRL",
93 "XSCALE_TTB",
94 "XSCALE_DAC",
95 "XSCALE_FSR",
96 "XSCALE_FAR",
97 "XSCALE_PID",
98 "XSCALE_CPACCESS",
99 "XSCALE_IBCR0", /* 10 */
100 "XSCALE_IBCR1",
101 "XSCALE_DBR0",
102 "XSCALE_DBR1",
103 "XSCALE_DBCON",
104 "XSCALE_TBREG",
105 "XSCALE_CHKPT0",
106 "XSCALE_CHKPT1",
107 "XSCALE_DCSR",
108 "XSCALE_TX",
109 "XSCALE_RX", /* 20 */
110 "XSCALE_TXRXCTRL",
111 };
112
113 static const struct xscale_reg xscale_reg_arch_info[] =
114 {
115 {XSCALE_MAINID, NULL},
116 {XSCALE_CACHETYPE, NULL},
117 {XSCALE_CTRL, NULL},
118 {XSCALE_AUXCTRL, NULL},
119 {XSCALE_TTB, NULL},
120 {XSCALE_DAC, NULL},
121 {XSCALE_FSR, NULL},
122 {XSCALE_FAR, NULL},
123 {XSCALE_PID, NULL},
124 {XSCALE_CPACCESS, NULL},
125 {XSCALE_IBCR0, NULL},
126 {XSCALE_IBCR1, NULL},
127 {XSCALE_DBR0, NULL},
128 {XSCALE_DBR1, NULL},
129 {XSCALE_DBCON, NULL},
130 {XSCALE_TBREG, NULL},
131 {XSCALE_CHKPT0, NULL},
132 {XSCALE_CHKPT1, NULL},
133 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
134 {-1, NULL}, /* TX accessed via JTAG */
135 {-1, NULL}, /* RX accessed via JTAG */
136 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
137 };
138
139 /* convenience wrapper to access XScale specific registers */
140 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
141 {
142 uint8_t buf[4];
143
144 buf_set_u32(buf, 0, 32, value);
145
146 return xscale_set_reg(reg, buf);
147 }
148
149 static const char xscale_not[] = "target is not an XScale";
150
151 static int xscale_verify_pointer(struct command_context *cmd_ctx,
152 struct xscale_common *xscale)
153 {
154 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
155 command_print(cmd_ctx, xscale_not);
156 return ERROR_TARGET_INVALID;
157 }
158 return ERROR_OK;
159 }
160
161 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr, tap_state_t end_state)
162 {
163 if (tap == NULL)
164 return ERROR_FAIL;
165
166 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
167 {
168 struct scan_field field;
169 uint8_t scratch[4];
170
171 memset(&field, 0, sizeof field);
172 field.num_bits = tap->ir_length;
173 field.out_value = scratch;
174 buf_set_u32(scratch, 0, field.num_bits, new_instr);
175
176 jtag_add_ir_scan(tap, &field, end_state);
177 }
178
179 return ERROR_OK;
180 }
181
182 static int xscale_read_dcsr(struct target *target)
183 {
184 struct xscale_common *xscale = target_to_xscale(target);
185 int retval;
186 struct scan_field fields[3];
187 uint8_t field0 = 0x0;
188 uint8_t field0_check_value = 0x2;
189 uint8_t field0_check_mask = 0x7;
190 uint8_t field2 = 0x0;
191 uint8_t field2_check_value = 0x0;
192 uint8_t field2_check_mask = 0x1;
193
194 xscale_jtag_set_instr(target->tap,
195 XSCALE_SELDCSR << xscale->xscale_variant,
196 TAP_DRPAUSE);
197
198 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
199 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
200
201 memset(&fields, 0, sizeof fields);
202
203 fields[0].num_bits = 3;
204 fields[0].out_value = &field0;
205 uint8_t tmp;
206 fields[0].in_value = &tmp;
207
208 fields[1].num_bits = 32;
209 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
210
211 fields[2].num_bits = 1;
212 fields[2].out_value = &field2;
213 uint8_t tmp2;
214 fields[2].in_value = &tmp2;
215
216 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
217
218 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
219 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
220
221 if ((retval = jtag_execute_queue()) != ERROR_OK)
222 {
223 LOG_ERROR("JTAG error while reading DCSR");
224 return retval;
225 }
226
227 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
228 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
229
230 /* write the register with the value we just read
231 * on this second pass, only the first bit of field0 is guaranteed to be 0)
232 */
233 field0_check_mask = 0x1;
234 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
235 fields[1].in_value = NULL;
236
237 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
238
239 /* DANGER!!! this must be here. It will make sure that the arguments
240 * to jtag_set_check_value() does not go out of scope! */
241 return jtag_execute_queue();
242 }
243
244
245 static void xscale_getbuf(jtag_callback_data_t arg)
246 {
247 uint8_t *in = (uint8_t *)arg;
248 *((uint32_t *)in) = buf_get_u32(in, 0, 32);
249 }
250
251 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
252 {
253 if (num_words == 0)
254 return ERROR_INVALID_ARGUMENTS;
255
256 struct xscale_common *xscale = target_to_xscale(target);
257 int retval = ERROR_OK;
258 tap_state_t path[3];
259 struct scan_field fields[3];
260 uint8_t *field0 = malloc(num_words * 1);
261 uint8_t field0_check_value = 0x2;
262 uint8_t field0_check_mask = 0x6;
263 uint32_t *field1 = malloc(num_words * 4);
264 uint8_t field2_check_value = 0x0;
265 uint8_t field2_check_mask = 0x1;
266 int words_done = 0;
267 int words_scheduled = 0;
268 int i;
269
270 path[0] = TAP_DRSELECT;
271 path[1] = TAP_DRCAPTURE;
272 path[2] = TAP_DRSHIFT;
273
274 memset(&fields, 0, sizeof fields);
275
276 fields[0].num_bits = 3;
277 fields[0].check_value = &field0_check_value;
278 fields[0].check_mask = &field0_check_mask;
279
280 fields[1].num_bits = 32;
281
282 fields[2].num_bits = 1;
283 fields[2].check_value = &field2_check_value;
284 fields[2].check_mask = &field2_check_mask;
285
286 xscale_jtag_set_instr(target->tap,
287 XSCALE_DBGTX << xscale->xscale_variant,
288 TAP_IDLE);
289 jtag_add_runtest(1, TAP_IDLE); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
290
291 /* repeat until all words have been collected */
292 int attempts = 0;
293 while (words_done < num_words)
294 {
295 /* schedule reads */
296 words_scheduled = 0;
297 for (i = words_done; i < num_words; i++)
298 {
299 fields[0].in_value = &field0[i];
300
301 jtag_add_pathmove(3, path);
302
303 fields[1].in_value = (uint8_t *)(field1 + i);
304
305 jtag_add_dr_scan_check(target->tap, 3, fields, TAP_IDLE);
306
307 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
308
309 words_scheduled++;
310 }
311
312 if ((retval = jtag_execute_queue()) != ERROR_OK)
313 {
314 LOG_ERROR("JTAG error while receiving data from debug handler");
315 break;
316 }
317
318 /* examine results */
319 for (i = words_done; i < num_words; i++)
320 {
321 if (!(field0[0] & 1))
322 {
323 /* move backwards if necessary */
324 int j;
325 for (j = i; j < num_words - 1; j++)
326 {
327 field0[j] = field0[j + 1];
328 field1[j] = field1[j + 1];
329 }
330 words_scheduled--;
331 }
332 }
333 if (words_scheduled == 0)
334 {
335 if (attempts++==1000)
336 {
337 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
338 retval = ERROR_TARGET_TIMEOUT;
339 break;
340 }
341 }
342
343 words_done += words_scheduled;
344 }
345
346 for (i = 0; i < num_words; i++)
347 *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
348
349 free(field1);
350
351 return retval;
352 }
353
354 static int xscale_read_tx(struct target *target, int consume)
355 {
356 struct xscale_common *xscale = target_to_xscale(target);
357 tap_state_t path[3];
358 tap_state_t noconsume_path[6];
359 int retval;
360 struct timeval timeout, now;
361 struct scan_field fields[3];
362 uint8_t field0_in = 0x0;
363 uint8_t field0_check_value = 0x2;
364 uint8_t field0_check_mask = 0x6;
365 uint8_t field2_check_value = 0x0;
366 uint8_t field2_check_mask = 0x1;
367
368 xscale_jtag_set_instr(target->tap,
369 XSCALE_DBGTX << xscale->xscale_variant,
370 TAP_IDLE);
371
372 path[0] = TAP_DRSELECT;
373 path[1] = TAP_DRCAPTURE;
374 path[2] = TAP_DRSHIFT;
375
376 noconsume_path[0] = TAP_DRSELECT;
377 noconsume_path[1] = TAP_DRCAPTURE;
378 noconsume_path[2] = TAP_DREXIT1;
379 noconsume_path[3] = TAP_DRPAUSE;
380 noconsume_path[4] = TAP_DREXIT2;
381 noconsume_path[5] = TAP_DRSHIFT;
382
383 memset(&fields, 0, sizeof fields);
384
385 fields[0].num_bits = 3;
386 fields[0].in_value = &field0_in;
387
388 fields[1].num_bits = 32;
389 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
390
391 fields[2].num_bits = 1;
392 uint8_t tmp;
393 fields[2].in_value = &tmp;
394
395 gettimeofday(&timeout, NULL);
396 timeval_add_time(&timeout, 1, 0);
397
398 for (;;)
399 {
400 /* if we want to consume the register content (i.e. clear TX_READY),
401 * we have to go straight from Capture-DR to Shift-DR
402 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
403 */
404 if (consume)
405 jtag_add_pathmove(3, path);
406 else
407 {
408 jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
409 }
410
411 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
412
413 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
414 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
415
416 if ((retval = jtag_execute_queue()) != ERROR_OK)
417 {
418 LOG_ERROR("JTAG error while reading TX");
419 return ERROR_TARGET_TIMEOUT;
420 }
421
422 gettimeofday(&now, NULL);
423 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
424 {
425 LOG_ERROR("time out reading TX register");
426 return ERROR_TARGET_TIMEOUT;
427 }
428 if (!((!(field0_in & 1)) && consume))
429 {
430 goto done;
431 }
432 if (debug_level >= 3)
433 {
434 LOG_DEBUG("waiting 100ms");
435 alive_sleep(100); /* avoid flooding the logs */
436 } else
437 {
438 keep_alive();
439 }
440 }
441 done:
442
443 if (!(field0_in & 1))
444 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
445
446 return ERROR_OK;
447 }
448
449 static int xscale_write_rx(struct target *target)
450 {
451 struct xscale_common *xscale = target_to_xscale(target);
452 int retval;
453 struct timeval timeout, now;
454 struct scan_field fields[3];
455 uint8_t field0_out = 0x0;
456 uint8_t field0_in = 0x0;
457 uint8_t field0_check_value = 0x2;
458 uint8_t field0_check_mask = 0x6;
459 uint8_t field2 = 0x0;
460 uint8_t field2_check_value = 0x0;
461 uint8_t field2_check_mask = 0x1;
462
463 xscale_jtag_set_instr(target->tap,
464 XSCALE_DBGRX << xscale->xscale_variant,
465 TAP_IDLE);
466
467 memset(&fields, 0, sizeof fields);
468
469 fields[0].num_bits = 3;
470 fields[0].out_value = &field0_out;
471 fields[0].in_value = &field0_in;
472
473 fields[1].num_bits = 32;
474 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
475
476 fields[2].num_bits = 1;
477 fields[2].out_value = &field2;
478 uint8_t tmp;
479 fields[2].in_value = &tmp;
480
481 gettimeofday(&timeout, NULL);
482 timeval_add_time(&timeout, 1, 0);
483
484 /* poll until rx_read is low */
485 LOG_DEBUG("polling RX");
486 for (;;)
487 {
488 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
489
490 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
491 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
492
493 if ((retval = jtag_execute_queue()) != ERROR_OK)
494 {
495 LOG_ERROR("JTAG error while writing RX");
496 return retval;
497 }
498
499 gettimeofday(&now, NULL);
500 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
501 {
502 LOG_ERROR("time out writing RX register");
503 return ERROR_TARGET_TIMEOUT;
504 }
505 if (!(field0_in & 1))
506 goto done;
507 if (debug_level >= 3)
508 {
509 LOG_DEBUG("waiting 100ms");
510 alive_sleep(100); /* avoid flooding the logs */
511 } else
512 {
513 keep_alive();
514 }
515 }
516 done:
517
518 /* set rx_valid */
519 field2 = 0x1;
520 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
521
522 if ((retval = jtag_execute_queue()) != ERROR_OK)
523 {
524 LOG_ERROR("JTAG error while writing RX");
525 return retval;
526 }
527
528 return ERROR_OK;
529 }
530
531 /* send count elements of size byte to the debug handler */
532 static int xscale_send(struct target *target, uint8_t *buffer, int count, int size)
533 {
534 struct xscale_common *xscale = target_to_xscale(target);
535 uint32_t t[3];
536 int bits[3];
537 int retval;
538 int done_count = 0;
539
540 xscale_jtag_set_instr(target->tap,
541 XSCALE_DBGRX << xscale->xscale_variant,
542 TAP_IDLE);
543
544 bits[0]=3;
545 t[0]=0;
546 bits[1]=32;
547 t[2]=1;
548 bits[2]=1;
549 int endianness = target->endianness;
550 while (done_count++ < count)
551 {
552 switch (size)
553 {
554 case 4:
555 if (endianness == TARGET_LITTLE_ENDIAN)
556 {
557 t[1]=le_to_h_u32(buffer);
558 } else
559 {
560 t[1]=be_to_h_u32(buffer);
561 }
562 break;
563 case 2:
564 if (endianness == TARGET_LITTLE_ENDIAN)
565 {
566 t[1]=le_to_h_u16(buffer);
567 } else
568 {
569 t[1]=be_to_h_u16(buffer);
570 }
571 break;
572 case 1:
573 t[1]=buffer[0];
574 break;
575 default:
576 LOG_ERROR("BUG: size neither 4, 2 nor 1");
577 return ERROR_INVALID_ARGUMENTS;
578 }
579 jtag_add_dr_out(target->tap,
580 3,
581 bits,
582 t,
583 TAP_IDLE);
584 buffer += size;
585 }
586
587 if ((retval = jtag_execute_queue()) != ERROR_OK)
588 {
589 LOG_ERROR("JTAG error while sending data to debug handler");
590 return retval;
591 }
592
593 return ERROR_OK;
594 }
595
596 static int xscale_send_u32(struct target *target, uint32_t value)
597 {
598 struct xscale_common *xscale = target_to_xscale(target);
599
600 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
601 return xscale_write_rx(target);
602 }
603
604 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
605 {
606 struct xscale_common *xscale = target_to_xscale(target);
607 int retval;
608 struct scan_field fields[3];
609 uint8_t field0 = 0x0;
610 uint8_t field0_check_value = 0x2;
611 uint8_t field0_check_mask = 0x7;
612 uint8_t field2 = 0x0;
613 uint8_t field2_check_value = 0x0;
614 uint8_t field2_check_mask = 0x1;
615
616 if (hold_rst != -1)
617 xscale->hold_rst = hold_rst;
618
619 if (ext_dbg_brk != -1)
620 xscale->external_debug_break = ext_dbg_brk;
621
622 xscale_jtag_set_instr(target->tap,
623 XSCALE_SELDCSR << xscale->xscale_variant,
624 TAP_IDLE);
625
626 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
627 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
628
629 memset(&fields, 0, sizeof fields);
630
631 fields[0].num_bits = 3;
632 fields[0].out_value = &field0;
633 uint8_t tmp;
634 fields[0].in_value = &tmp;
635
636 fields[1].num_bits = 32;
637 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
638
639 fields[2].num_bits = 1;
640 fields[2].out_value = &field2;
641 uint8_t tmp2;
642 fields[2].in_value = &tmp2;
643
644 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
645
646 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
647 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
648
649 if ((retval = jtag_execute_queue()) != ERROR_OK)
650 {
651 LOG_ERROR("JTAG error while writing DCSR");
652 return retval;
653 }
654
655 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
656 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
657
658 return ERROR_OK;
659 }
660
661 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
662 static unsigned int parity (unsigned int v)
663 {
664 // unsigned int ov = v;
665 v ^= v >> 16;
666 v ^= v >> 8;
667 v ^= v >> 4;
668 v &= 0xf;
669 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
670 return (0x6996 >> v) & 1;
671 }
672
673 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
674 {
675 struct xscale_common *xscale = target_to_xscale(target);
676 uint8_t packet[4];
677 uint8_t cmd;
678 int word;
679 struct scan_field fields[2];
680
681 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
682
683 /* LDIC into IR */
684 xscale_jtag_set_instr(target->tap,
685 XSCALE_LDIC << xscale->xscale_variant,
686 TAP_IDLE);
687
688 /* CMD is b011 to load a cacheline into the Mini ICache.
689 * Loading into the main ICache is deprecated, and unused.
690 * It's followed by three zero bits, and 27 address bits.
691 */
692 buf_set_u32(&cmd, 0, 6, 0x3);
693
694 /* virtual address of desired cache line */
695 buf_set_u32(packet, 0, 27, va >> 5);
696
697 memset(&fields, 0, sizeof fields);
698
699 fields[0].num_bits = 6;
700 fields[0].out_value = &cmd;
701
702 fields[1].num_bits = 27;
703 fields[1].out_value = packet;
704
705 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
706
707 /* rest of packet is a cacheline: 8 instructions, with parity */
708 fields[0].num_bits = 32;
709 fields[0].out_value = packet;
710
711 fields[1].num_bits = 1;
712 fields[1].out_value = &cmd;
713
714 for (word = 0; word < 8; word++)
715 {
716 buf_set_u32(packet, 0, 32, buffer[word]);
717
718 uint32_t value;
719 memcpy(&value, packet, sizeof(uint32_t));
720 cmd = parity(value);
721
722 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
723 }
724
725 return jtag_execute_queue();
726 }
727
728 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
729 {
730 struct xscale_common *xscale = target_to_xscale(target);
731 uint8_t packet[4];
732 uint8_t cmd;
733 struct scan_field fields[2];
734
735 xscale_jtag_set_instr(target->tap,
736 XSCALE_LDIC << xscale->xscale_variant,
737 TAP_IDLE);
738
739 /* CMD for invalidate IC line b000, bits [6:4] b000 */
740 buf_set_u32(&cmd, 0, 6, 0x0);
741
742 /* virtual address of desired cache line */
743 buf_set_u32(packet, 0, 27, va >> 5);
744
745 memset(&fields, 0, sizeof fields);
746
747 fields[0].num_bits = 6;
748 fields[0].out_value = &cmd;
749
750 fields[1].num_bits = 27;
751 fields[1].out_value = packet;
752
753 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
754
755 return ERROR_OK;
756 }
757
758 static int xscale_update_vectors(struct target *target)
759 {
760 struct xscale_common *xscale = target_to_xscale(target);
761 int i;
762 int retval;
763
764 uint32_t low_reset_branch, high_reset_branch;
765
766 for (i = 1; i < 8; i++)
767 {
768 /* if there's a static vector specified for this exception, override */
769 if (xscale->static_high_vectors_set & (1 << i))
770 {
771 xscale->high_vectors[i] = xscale->static_high_vectors[i];
772 }
773 else
774 {
775 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
776 if (retval == ERROR_TARGET_TIMEOUT)
777 return retval;
778 if (retval != ERROR_OK)
779 {
780 /* Some of these reads will fail as part of normal execution */
781 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
782 }
783 }
784 }
785
786 for (i = 1; i < 8; i++)
787 {
788 if (xscale->static_low_vectors_set & (1 << i))
789 {
790 xscale->low_vectors[i] = xscale->static_low_vectors[i];
791 }
792 else
793 {
794 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
795 if (retval == ERROR_TARGET_TIMEOUT)
796 return retval;
797 if (retval != ERROR_OK)
798 {
799 /* Some of these reads will fail as part of normal execution */
800 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
801 }
802 }
803 }
804
805 /* calculate branches to debug handler */
806 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
807 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
808
809 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
810 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
811
812 /* invalidate and load exception vectors in mini i-cache */
813 xscale_invalidate_ic_line(target, 0x0);
814 xscale_invalidate_ic_line(target, 0xffff0000);
815
816 xscale_load_ic(target, 0x0, xscale->low_vectors);
817 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
818
819 return ERROR_OK;
820 }
821
822 static int xscale_arch_state(struct target *target)
823 {
824 struct xscale_common *xscale = target_to_xscale(target);
825 struct arm *armv4_5 = &xscale->armv4_5_common;
826
827 static const char *state[] =
828 {
829 "disabled", "enabled"
830 };
831
832 static const char *arch_dbg_reason[] =
833 {
834 "", "\n(processor reset)", "\n(trace buffer full)"
835 };
836
837 if (armv4_5->common_magic != ARM_COMMON_MAGIC)
838 {
839 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
840 return ERROR_INVALID_ARGUMENTS;
841 }
842
843 arm_arch_state(target);
844 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s%s",
845 state[xscale->armv4_5_mmu.mmu_enabled],
846 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
847 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
848 arch_dbg_reason[xscale->arch_debug_reason]);
849
850 return ERROR_OK;
851 }
852
853 static int xscale_poll(struct target *target)
854 {
855 int retval = ERROR_OK;
856
857 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
858 {
859 enum target_state previous_state = target->state;
860 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
861 {
862
863 /* there's data to read from the tx register, we entered debug state */
864 target->state = TARGET_HALTED;
865
866 /* process debug entry, fetching current mode regs */
867 retval = xscale_debug_entry(target);
868 }
869 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
870 {
871 LOG_USER("error while polling TX register, reset CPU");
872 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
873 target->state = TARGET_HALTED;
874 }
875
876 /* debug_entry could have overwritten target state (i.e. immediate resume)
877 * don't signal event handlers in that case
878 */
879 if (target->state != TARGET_HALTED)
880 return ERROR_OK;
881
882 /* if target was running, signal that we halted
883 * otherwise we reentered from debug execution */
884 if (previous_state == TARGET_RUNNING)
885 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
886 else
887 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
888 }
889
890 return retval;
891 }
892
893 static int xscale_debug_entry(struct target *target)
894 {
895 struct xscale_common *xscale = target_to_xscale(target);
896 struct arm *armv4_5 = &xscale->armv4_5_common;
897 uint32_t pc;
898 uint32_t buffer[10];
899 int i;
900 int retval;
901 uint32_t moe;
902
903 /* clear external dbg break (will be written on next DCSR read) */
904 xscale->external_debug_break = 0;
905 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
906 return retval;
907
908 /* get r0, pc, r1 to r7 and cpsr */
909 if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
910 return retval;
911
912 /* move r0 from buffer to register cache */
913 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
914 armv4_5->core_cache->reg_list[0].dirty = 1;
915 armv4_5->core_cache->reg_list[0].valid = 1;
916 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
917
918 /* move pc from buffer to register cache */
919 buf_set_u32(armv4_5->pc->value, 0, 32, buffer[1]);
920 armv4_5->pc->dirty = 1;
921 armv4_5->pc->valid = 1;
922 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
923
924 /* move data from buffer to register cache */
925 for (i = 1; i <= 7; i++)
926 {
927 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
928 armv4_5->core_cache->reg_list[i].dirty = 1;
929 armv4_5->core_cache->reg_list[i].valid = 1;
930 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
931 }
932
933 arm_set_cpsr(armv4_5, buffer[9]);
934 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
935
936 if (!is_arm_mode(armv4_5->core_mode))
937 {
938 target->state = TARGET_UNKNOWN;
939 LOG_ERROR("cpsr contains invalid mode value - communication failure");
940 return ERROR_TARGET_FAILURE;
941 }
942 LOG_DEBUG("target entered debug state in %s mode",
943 arm_mode_name(armv4_5->core_mode));
944
945 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
946 if (armv4_5->spsr) {
947 xscale_receive(target, buffer, 8);
948 buf_set_u32(armv4_5->spsr->value, 0, 32, buffer[7]);
949 armv4_5->spsr->dirty = false;
950 armv4_5->spsr->valid = true;
951 }
952 else
953 {
954 /* r8 to r14, but no spsr */
955 xscale_receive(target, buffer, 7);
956 }
957
958 /* move data from buffer to right banked register in cache */
959 for (i = 8; i <= 14; i++)
960 {
961 struct reg *r = arm_reg_current(armv4_5, i);
962
963 buf_set_u32(r->value, 0, 32, buffer[i - 8]);
964 r->dirty = false;
965 r->valid = true;
966 }
967
968 /* examine debug reason */
969 xscale_read_dcsr(target);
970 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
971
972 /* stored PC (for calculating fixup) */
973 pc = buf_get_u32(armv4_5->pc->value, 0, 32);
974
975 switch (moe)
976 {
977 case 0x0: /* Processor reset */
978 target->debug_reason = DBG_REASON_DBGRQ;
979 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
980 pc -= 4;
981 break;
982 case 0x1: /* Instruction breakpoint hit */
983 target->debug_reason = DBG_REASON_BREAKPOINT;
984 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
985 pc -= 4;
986 break;
987 case 0x2: /* Data breakpoint hit */
988 target->debug_reason = DBG_REASON_WATCHPOINT;
989 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
990 pc -= 4;
991 break;
992 case 0x3: /* BKPT instruction executed */
993 target->debug_reason = DBG_REASON_BREAKPOINT;
994 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
995 pc -= 4;
996 break;
997 case 0x4: /* Ext. debug event */
998 target->debug_reason = DBG_REASON_DBGRQ;
999 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1000 pc -= 4;
1001 break;
1002 case 0x5: /* Vector trap occured */
1003 target->debug_reason = DBG_REASON_BREAKPOINT;
1004 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1005 pc -= 4;
1006 break;
1007 case 0x6: /* Trace buffer full break */
1008 target->debug_reason = DBG_REASON_DBGRQ;
1009 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1010 pc -= 4;
1011 break;
1012 case 0x7: /* Reserved (may flag Hot-Debug support) */
1013 default:
1014 LOG_ERROR("Method of Entry is 'Reserved'");
1015 exit(-1);
1016 break;
1017 }
1018
1019 /* apply PC fixup */
1020 buf_set_u32(armv4_5->pc->value, 0, 32, pc);
1021
1022 /* on the first debug entry, identify cache type */
1023 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1024 {
1025 uint32_t cache_type_reg;
1026
1027 /* read cp15 cache type register */
1028 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1029 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1030
1031 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1032 }
1033
1034 /* examine MMU and Cache settings */
1035 /* read cp15 control register */
1036 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1037 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1038 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1039 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1040 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1041
1042 /* tracing enabled, read collected trace data */
1043 if (xscale->trace.buffer_enabled)
1044 {
1045 xscale_read_trace(target);
1046 xscale->trace.buffer_fill--;
1047
1048 /* resume if we're still collecting trace data */
1049 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1050 && (xscale->trace.buffer_fill > 0))
1051 {
1052 xscale_resume(target, 1, 0x0, 1, 0);
1053 }
1054 else
1055 {
1056 xscale->trace.buffer_enabled = 0;
1057 }
1058 }
1059
1060 return ERROR_OK;
1061 }
1062
1063 static int xscale_halt(struct target *target)
1064 {
1065 struct xscale_common *xscale = target_to_xscale(target);
1066
1067 LOG_DEBUG("target->state: %s",
1068 target_state_name(target));
1069
1070 if (target->state == TARGET_HALTED)
1071 {
1072 LOG_DEBUG("target was already halted");
1073 return ERROR_OK;
1074 }
1075 else if (target->state == TARGET_UNKNOWN)
1076 {
1077 /* this must not happen for a xscale target */
1078 LOG_ERROR("target was in unknown state when halt was requested");
1079 return ERROR_TARGET_INVALID;
1080 }
1081 else if (target->state == TARGET_RESET)
1082 {
1083 LOG_DEBUG("target->state == TARGET_RESET");
1084 }
1085 else
1086 {
1087 /* assert external dbg break */
1088 xscale->external_debug_break = 1;
1089 xscale_read_dcsr(target);
1090
1091 target->debug_reason = DBG_REASON_DBGRQ;
1092 }
1093
1094 return ERROR_OK;
1095 }
1096
1097 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1098 {
1099 struct xscale_common *xscale = target_to_xscale(target);
1100 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1101 int retval;
1102
1103 if (xscale->ibcr0_used)
1104 {
1105 struct breakpoint *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1106
1107 if (ibcr0_bp)
1108 {
1109 xscale_unset_breakpoint(target, ibcr0_bp);
1110 }
1111 else
1112 {
1113 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1114 exit(-1);
1115 }
1116 }
1117
1118 if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
1119 return retval;
1120
1121 return ERROR_OK;
1122 }
1123
1124 static int xscale_disable_single_step(struct target *target)
1125 {
1126 struct xscale_common *xscale = target_to_xscale(target);
1127 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1128 int retval;
1129
1130 if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
1131 return retval;
1132
1133 return ERROR_OK;
1134 }
1135
1136 static void xscale_enable_watchpoints(struct target *target)
1137 {
1138 struct watchpoint *watchpoint = target->watchpoints;
1139
1140 while (watchpoint)
1141 {
1142 if (watchpoint->set == 0)
1143 xscale_set_watchpoint(target, watchpoint);
1144 watchpoint = watchpoint->next;
1145 }
1146 }
1147
1148 static void xscale_enable_breakpoints(struct target *target)
1149 {
1150 struct breakpoint *breakpoint = target->breakpoints;
1151
1152 /* set any pending breakpoints */
1153 while (breakpoint)
1154 {
1155 if (breakpoint->set == 0)
1156 xscale_set_breakpoint(target, breakpoint);
1157 breakpoint = breakpoint->next;
1158 }
1159 }
1160
1161 static int xscale_resume(struct target *target, int current,
1162 uint32_t address, int handle_breakpoints, int debug_execution)
1163 {
1164 struct xscale_common *xscale = target_to_xscale(target);
1165 struct arm *armv4_5 = &xscale->armv4_5_common;
1166 struct breakpoint *breakpoint = target->breakpoints;
1167 uint32_t current_pc;
1168 int retval;
1169 int i;
1170
1171 LOG_DEBUG("-");
1172
1173 if (target->state != TARGET_HALTED)
1174 {
1175 LOG_WARNING("target not halted");
1176 return ERROR_TARGET_NOT_HALTED;
1177 }
1178
1179 if (!debug_execution)
1180 {
1181 target_free_all_working_areas(target);
1182 }
1183
1184 /* update vector tables */
1185 if ((retval = xscale_update_vectors(target)) != ERROR_OK)
1186 return retval;
1187
1188 /* current = 1: continue on current pc, otherwise continue at <address> */
1189 if (!current)
1190 buf_set_u32(armv4_5->pc->value, 0, 32, address);
1191
1192 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1193
1194 /* if we're at the reset vector, we have to simulate the branch */
1195 if (current_pc == 0x0)
1196 {
1197 arm_simulate_step(target, NULL);
1198 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1199 }
1200
1201 /* the front-end may request us not to handle breakpoints */
1202 if (handle_breakpoints)
1203 {
1204 breakpoint = breakpoint_find(target,
1205 buf_get_u32(armv4_5->pc->value, 0, 32));
1206 if (breakpoint != NULL)
1207 {
1208 uint32_t next_pc;
1209 int saved_trace_buffer_enabled;
1210
1211 /* there's a breakpoint at the current PC, we have to step over it */
1212 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1213 xscale_unset_breakpoint(target, breakpoint);
1214
1215 /* calculate PC of next instruction */
1216 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1217 {
1218 uint32_t current_opcode;
1219 target_read_u32(target, current_pc, &current_opcode);
1220 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1221 }
1222
1223 LOG_DEBUG("enable single-step");
1224 xscale_enable_single_step(target, next_pc);
1225
1226 /* restore banked registers */
1227 retval = xscale_restore_banked(target);
1228
1229 /* send resume request */
1230 xscale_send_u32(target, 0x30);
1231
1232 /* send CPSR */
1233 xscale_send_u32(target,
1234 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1235 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1236 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1237
1238 for (i = 7; i >= 0; i--)
1239 {
1240 /* send register */
1241 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1242 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1243 }
1244
1245 /* send PC */
1246 xscale_send_u32(target,
1247 buf_get_u32(armv4_5->pc->value, 0, 32));
1248 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32,
1249 buf_get_u32(armv4_5->pc->value, 0, 32));
1250
1251 /* disable trace data collection in xscale_debug_entry() */
1252 saved_trace_buffer_enabled = xscale->trace.buffer_enabled;
1253 xscale->trace.buffer_enabled = 0;
1254
1255 /* wait for and process debug entry */
1256 xscale_debug_entry(target);
1257
1258 /* re-enable trace buffer, if enabled previously */
1259 xscale->trace.buffer_enabled = saved_trace_buffer_enabled;
1260
1261 LOG_DEBUG("disable single-step");
1262 xscale_disable_single_step(target);
1263
1264 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1265 xscale_set_breakpoint(target, breakpoint);
1266 }
1267 }
1268
1269 /* enable any pending breakpoints and watchpoints */
1270 xscale_enable_breakpoints(target);
1271 xscale_enable_watchpoints(target);
1272
1273 /* restore banked registers */
1274 retval = xscale_restore_banked(target);
1275
1276 /* send resume request (command 0x30 or 0x31)
1277 * clean the trace buffer if it is to be enabled (0x62) */
1278 if (xscale->trace.buffer_enabled)
1279 {
1280 xscale_send_u32(target, 0x62);
1281 xscale_send_u32(target, 0x31);
1282 }
1283 else
1284 xscale_send_u32(target, 0x30);
1285
1286 /* send CPSR */
1287 xscale_send_u32(target, buf_get_u32(armv4_5->cpsr->value, 0, 32));
1288 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1289 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1290
1291 for (i = 7; i >= 0; i--)
1292 {
1293 /* send register */
1294 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1295 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1296 }
1297
1298 /* send PC */
1299 xscale_send_u32(target, buf_get_u32(armv4_5->pc->value, 0, 32));
1300 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1301 buf_get_u32(armv4_5->pc->value, 0, 32));
1302
1303 target->debug_reason = DBG_REASON_NOTHALTED;
1304
1305 if (!debug_execution)
1306 {
1307 /* registers are now invalid */
1308 register_cache_invalidate(armv4_5->core_cache);
1309 target->state = TARGET_RUNNING;
1310 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1311 }
1312 else
1313 {
1314 target->state = TARGET_DEBUG_RUNNING;
1315 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1316 }
1317
1318 LOG_DEBUG("target resumed");
1319
1320 return ERROR_OK;
1321 }
1322
1323 static int xscale_step_inner(struct target *target, int current,
1324 uint32_t address, int handle_breakpoints)
1325 {
1326 struct xscale_common *xscale = target_to_xscale(target);
1327 struct arm *armv4_5 = &xscale->armv4_5_common;
1328 uint32_t next_pc;
1329 int retval;
1330 int i;
1331
1332 target->debug_reason = DBG_REASON_SINGLESTEP;
1333
1334 /* calculate PC of next instruction */
1335 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1336 {
1337 uint32_t current_opcode, current_pc;
1338 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1339
1340 target_read_u32(target, current_pc, &current_opcode);
1341 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1342 return retval;
1343 }
1344
1345 LOG_DEBUG("enable single-step");
1346 if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
1347 return retval;
1348
1349 /* restore banked registers */
1350 if ((retval = xscale_restore_banked(target)) != ERROR_OK)
1351 return retval;
1352
1353 /* send resume request (command 0x30 or 0x31)
1354 * clean the trace buffer if it is to be enabled (0x62) */
1355 if (xscale->trace.buffer_enabled)
1356 {
1357 if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
1358 return retval;
1359 if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
1360 return retval;
1361 }
1362 else
1363 if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
1364 return retval;
1365
1366 /* send CPSR */
1367 retval = xscale_send_u32(target,
1368 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1369 if (retval != ERROR_OK)
1370 return retval;
1371 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1372 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1373
1374 for (i = 7; i >= 0; i--)
1375 {
1376 /* send register */
1377 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
1378 return retval;
1379 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1380 }
1381
1382 /* send PC */
1383 retval = xscale_send_u32(target,
1384 buf_get_u32(armv4_5->pc->value, 0, 32));
1385 if (retval != ERROR_OK)
1386 return retval;
1387 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1388 buf_get_u32(armv4_5->pc->value, 0, 32));
1389
1390 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1391
1392 /* registers are now invalid */
1393 register_cache_invalidate(armv4_5->core_cache);
1394
1395 /* wait for and process debug entry */
1396 if ((retval = xscale_debug_entry(target)) != ERROR_OK)
1397 return retval;
1398
1399 LOG_DEBUG("disable single-step");
1400 if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
1401 return retval;
1402
1403 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1404
1405 return ERROR_OK;
1406 }
1407
1408 static int xscale_step(struct target *target, int current,
1409 uint32_t address, int handle_breakpoints)
1410 {
1411 struct arm *armv4_5 = target_to_arm(target);
1412 struct breakpoint *breakpoint = NULL;
1413
1414 uint32_t current_pc;
1415 int retval;
1416
1417 if (target->state != TARGET_HALTED)
1418 {
1419 LOG_WARNING("target not halted");
1420 return ERROR_TARGET_NOT_HALTED;
1421 }
1422
1423 /* current = 1: continue on current pc, otherwise continue at <address> */
1424 if (!current)
1425 buf_set_u32(armv4_5->pc->value, 0, 32, address);
1426
1427 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1428
1429 /* if we're at the reset vector, we have to simulate the step */
1430 if (current_pc == 0x0)
1431 {
1432 if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
1433 return retval;
1434 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1435
1436 target->debug_reason = DBG_REASON_SINGLESTEP;
1437 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1438
1439 return ERROR_OK;
1440 }
1441
1442 /* the front-end may request us not to handle breakpoints */
1443 if (handle_breakpoints)
1444 breakpoint = breakpoint_find(target,
1445 buf_get_u32(armv4_5->pc->value, 0, 32));
1446 if (breakpoint != NULL) {
1447 retval = xscale_unset_breakpoint(target, breakpoint);
1448 if (retval != ERROR_OK)
1449 return retval;
1450 }
1451
1452 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1453
1454 if (breakpoint)
1455 {
1456 xscale_set_breakpoint(target, breakpoint);
1457 }
1458
1459 LOG_DEBUG("target stepped");
1460
1461 return ERROR_OK;
1462
1463 }
1464
1465 static int xscale_assert_reset(struct target *target)
1466 {
1467 struct xscale_common *xscale = target_to_xscale(target);
1468
1469 LOG_DEBUG("target->state: %s",
1470 target_state_name(target));
1471
1472 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1473 * end up in T-L-R, which would reset JTAG
1474 */
1475 xscale_jtag_set_instr(target->tap,
1476 XSCALE_SELDCSR << xscale->xscale_variant,
1477 TAP_IDLE);
1478
1479 /* set Hold reset, Halt mode and Trap Reset */
1480 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1481 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1482 xscale_write_dcsr(target, 1, 0);
1483
1484 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1485 xscale_jtag_set_instr(target->tap, ~0, TAP_IDLE);
1486 jtag_execute_queue();
1487
1488 /* assert reset */
1489 jtag_add_reset(0, 1);
1490
1491 /* sleep 1ms, to be sure we fulfill any requirements */
1492 jtag_add_sleep(1000);
1493 jtag_execute_queue();
1494
1495 target->state = TARGET_RESET;
1496
1497 if (target->reset_halt)
1498 {
1499 int retval;
1500 if ((retval = target_halt(target)) != ERROR_OK)
1501 return retval;
1502 }
1503
1504 return ERROR_OK;
1505 }
1506
1507 static int xscale_deassert_reset(struct target *target)
1508 {
1509 struct xscale_common *xscale = target_to_xscale(target);
1510 struct breakpoint *breakpoint = target->breakpoints;
1511
1512 LOG_DEBUG("-");
1513
1514 xscale->ibcr_available = 2;
1515 xscale->ibcr0_used = 0;
1516 xscale->ibcr1_used = 0;
1517
1518 xscale->dbr_available = 2;
1519 xscale->dbr0_used = 0;
1520 xscale->dbr1_used = 0;
1521
1522 /* mark all hardware breakpoints as unset */
1523 while (breakpoint)
1524 {
1525 if (breakpoint->type == BKPT_HARD)
1526 {
1527 breakpoint->set = 0;
1528 }
1529 breakpoint = breakpoint->next;
1530 }
1531
1532 register_cache_invalidate(xscale->armv4_5_common.core_cache);
1533
1534 /* FIXME mark hardware watchpoints got unset too. Also,
1535 * at least some of the XScale registers are invalid...
1536 */
1537
1538 /*
1539 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1540 * contents got invalidated. Safer to force that, so writing new
1541 * contents can't ever fail..
1542 */
1543 {
1544 uint32_t address;
1545 unsigned buf_cnt;
1546 const uint8_t *buffer = xscale_debug_handler;
1547 int retval;
1548
1549 /* release SRST */
1550 jtag_add_reset(0, 0);
1551
1552 /* wait 300ms; 150 and 100ms were not enough */
1553 jtag_add_sleep(300*1000);
1554
1555 jtag_add_runtest(2030, TAP_IDLE);
1556 jtag_execute_queue();
1557
1558 /* set Hold reset, Halt mode and Trap Reset */
1559 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1560 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1561 xscale_write_dcsr(target, 1, 0);
1562
1563 /* Load the debug handler into the mini-icache. Since
1564 * it's using halt mode (not monitor mode), it runs in
1565 * "Special Debug State" for access to registers, memory,
1566 * coprocessors, trace data, etc.
1567 */
1568 address = xscale->handler_address;
1569 for (unsigned binary_size = sizeof xscale_debug_handler - 1;
1570 binary_size > 0;
1571 binary_size -= buf_cnt, buffer += buf_cnt)
1572 {
1573 uint32_t cache_line[8];
1574 unsigned i;
1575
1576 buf_cnt = binary_size;
1577 if (buf_cnt > 32)
1578 buf_cnt = 32;
1579
1580 for (i = 0; i < buf_cnt; i += 4)
1581 {
1582 /* convert LE buffer to host-endian uint32_t */
1583 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1584 }
1585
1586 for (; i < 32; i += 4)
1587 {
1588 cache_line[i / 4] = 0xe1a08008;
1589 }
1590
1591 /* only load addresses other than the reset vectors */
1592 if ((address % 0x400) != 0x0)
1593 {
1594 retval = xscale_load_ic(target, address,
1595 cache_line);
1596 if (retval != ERROR_OK)
1597 return retval;
1598 }
1599
1600 address += buf_cnt;
1601 };
1602
1603 retval = xscale_load_ic(target, 0x0,
1604 xscale->low_vectors);
1605 if (retval != ERROR_OK)
1606 return retval;
1607 retval = xscale_load_ic(target, 0xffff0000,
1608 xscale->high_vectors);
1609 if (retval != ERROR_OK)
1610 return retval;
1611
1612 jtag_add_runtest(30, TAP_IDLE);
1613
1614 jtag_add_sleep(100000);
1615
1616 /* set Hold reset, Halt mode and Trap Reset */
1617 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1618 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1619 xscale_write_dcsr(target, 1, 0);
1620
1621 /* clear Hold reset to let the target run (should enter debug handler) */
1622 xscale_write_dcsr(target, 0, 1);
1623 target->state = TARGET_RUNNING;
1624
1625 if (!target->reset_halt)
1626 {
1627 jtag_add_sleep(10000);
1628
1629 /* we should have entered debug now */
1630 xscale_debug_entry(target);
1631 target->state = TARGET_HALTED;
1632
1633 /* resume the target */
1634 xscale_resume(target, 1, 0x0, 1, 0);
1635 }
1636 }
1637
1638 return ERROR_OK;
1639 }
1640
1641 static int xscale_read_core_reg(struct target *target, struct reg *r,
1642 int num, enum arm_mode mode)
1643 {
1644 /** \todo add debug handler support for core register reads */
1645 LOG_ERROR("not implemented");
1646 return ERROR_OK;
1647 }
1648
1649 static int xscale_write_core_reg(struct target *target, struct reg *r,
1650 int num, enum arm_mode mode, uint32_t value)
1651 {
1652 /** \todo add debug handler support for core register writes */
1653 LOG_ERROR("not implemented");
1654 return ERROR_OK;
1655 }
1656
1657 static int xscale_full_context(struct target *target)
1658 {
1659 struct arm *armv4_5 = target_to_arm(target);
1660
1661 uint32_t *buffer;
1662
1663 int i, j;
1664
1665 LOG_DEBUG("-");
1666
1667 if (target->state != TARGET_HALTED)
1668 {
1669 LOG_WARNING("target not halted");
1670 return ERROR_TARGET_NOT_HALTED;
1671 }
1672
1673 buffer = malloc(4 * 8);
1674
1675 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1676 * we can't enter User mode on an XScale (unpredictable),
1677 * but User shares registers with SYS
1678 */
1679 for (i = 1; i < 7; i++)
1680 {
1681 enum arm_mode mode = armv4_5_number_to_mode(i);
1682 bool valid = true;
1683 struct reg *r;
1684
1685 if (mode == ARM_MODE_USR)
1686 continue;
1687
1688 /* check if there are invalid registers in the current mode
1689 */
1690 for (j = 0; valid && j <= 16; j++)
1691 {
1692 if (!ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1693 mode, j).valid)
1694 valid = false;
1695 }
1696 if (valid)
1697 continue;
1698
1699 /* request banked registers */
1700 xscale_send_u32(target, 0x0);
1701
1702 /* send CPSR for desired bank mode */
1703 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1704
1705 /* get banked registers: r8 to r14; and SPSR
1706 * except in USR/SYS mode
1707 */
1708 if (mode != ARM_MODE_SYS) {
1709 /* SPSR */
1710 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1711 mode, 16);
1712
1713 xscale_receive(target, buffer, 8);
1714
1715 buf_set_u32(r->value, 0, 32, buffer[7]);
1716 r->dirty = false;
1717 r->valid = true;
1718 } else {
1719 xscale_receive(target, buffer, 7);
1720 }
1721
1722 /* move data from buffer to register cache */
1723 for (j = 8; j <= 14; j++)
1724 {
1725 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1726 mode, j);
1727
1728 buf_set_u32(r->value, 0, 32, buffer[j - 8]);
1729 r->dirty = false;
1730 r->valid = true;
1731 }
1732 }
1733
1734 free(buffer);
1735
1736 return ERROR_OK;
1737 }
1738
1739 static int xscale_restore_banked(struct target *target)
1740 {
1741 struct arm *armv4_5 = target_to_arm(target);
1742
1743 int i, j;
1744
1745 if (target->state != TARGET_HALTED)
1746 {
1747 LOG_WARNING("target not halted");
1748 return ERROR_TARGET_NOT_HALTED;
1749 }
1750
1751 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1752 * and check if any banked registers need to be written. Ignore
1753 * USR mode (number 0) in favor of SYS; we can't enter User mode on
1754 * an XScale (unpredictable), but they share all registers.
1755 */
1756 for (i = 1; i < 7; i++)
1757 {
1758 enum arm_mode mode = armv4_5_number_to_mode(i);
1759 struct reg *r;
1760
1761 if (mode == ARM_MODE_USR)
1762 continue;
1763
1764 /* check if there are dirty registers in this mode */
1765 for (j = 8; j <= 14; j++)
1766 {
1767 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1768 mode, j).dirty)
1769 goto dirty;
1770 }
1771
1772 /* if not USR/SYS, check if the SPSR needs to be written */
1773 if (mode != ARM_MODE_SYS)
1774 {
1775 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1776 mode, 16).dirty)
1777 goto dirty;
1778 }
1779
1780 /* there's nothing to flush for this mode */
1781 continue;
1782
1783 dirty:
1784 /* command 0x1: "send banked registers" */
1785 xscale_send_u32(target, 0x1);
1786
1787 /* send CPSR for desired mode */
1788 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1789
1790 /* send r8 to r14/lr ... only FIQ needs more than r13..r14,
1791 * but this protocol doesn't understand that nuance.
1792 */
1793 for (j = 8; j <= 14; j++) {
1794 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1795 mode, j);
1796 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1797 r->dirty = false;
1798 }
1799
1800 /* send spsr if not in USR/SYS mode */
1801 if (mode != ARM_MODE_SYS) {
1802 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1803 mode, 16);
1804 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1805 r->dirty = false;
1806 }
1807 }
1808
1809 return ERROR_OK;
1810 }
1811
1812 static int xscale_read_memory(struct target *target, uint32_t address,
1813 uint32_t size, uint32_t count, uint8_t *buffer)
1814 {
1815 struct xscale_common *xscale = target_to_xscale(target);
1816 uint32_t *buf32;
1817 uint32_t i;
1818 int retval;
1819
1820 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1821
1822 if (target->state != TARGET_HALTED)
1823 {
1824 LOG_WARNING("target not halted");
1825 return ERROR_TARGET_NOT_HALTED;
1826 }
1827
1828 /* sanitize arguments */
1829 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1830 return ERROR_INVALID_ARGUMENTS;
1831
1832 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1833 return ERROR_TARGET_UNALIGNED_ACCESS;
1834
1835 /* send memory read request (command 0x1n, n: access size) */
1836 if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
1837 return retval;
1838
1839 /* send base address for read request */
1840 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1841 return retval;
1842
1843 /* send number of requested data words */
1844 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1845 return retval;
1846
1847 /* receive data from target (count times 32-bit words in host endianness) */
1848 buf32 = malloc(4 * count);
1849 if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
1850 return retval;
1851
1852 /* extract data from host-endian buffer into byte stream */
1853 for (i = 0; i < count; i++)
1854 {
1855 switch (size)
1856 {
1857 case 4:
1858 target_buffer_set_u32(target, buffer, buf32[i]);
1859 buffer += 4;
1860 break;
1861 case 2:
1862 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1863 buffer += 2;
1864 break;
1865 case 1:
1866 *buffer++ = buf32[i] & 0xff;
1867 break;
1868 default:
1869 LOG_ERROR("invalid read size");
1870 return ERROR_INVALID_ARGUMENTS;
1871 }
1872 }
1873
1874 free(buf32);
1875
1876 /* examine DCSR, to see if Sticky Abort (SA) got set */
1877 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1878 return retval;
1879 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1880 {
1881 /* clear SA bit */
1882 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1883 return retval;
1884
1885 return ERROR_TARGET_DATA_ABORT;
1886 }
1887
1888 return ERROR_OK;
1889 }
1890
1891 static int xscale_read_phys_memory(struct target *target, uint32_t address,
1892 uint32_t size, uint32_t count, uint8_t *buffer)
1893 {
1894 struct xscale_common *xscale = target_to_xscale(target);
1895
1896 /* with MMU inactive, there are only physical addresses */
1897 if (!xscale->armv4_5_mmu.mmu_enabled)
1898 return xscale_read_memory(target, address, size, count, buffer);
1899
1900 /** \todo: provide a non-stub implementation of this routine. */
1901 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1902 target_name(target), __func__);
1903 return ERROR_FAIL;
1904 }
1905
1906 static int xscale_write_memory(struct target *target, uint32_t address,
1907 uint32_t size, uint32_t count, uint8_t *buffer)
1908 {
1909 struct xscale_common *xscale = target_to_xscale(target);
1910 int retval;
1911
1912 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1913
1914 if (target->state != TARGET_HALTED)
1915 {
1916 LOG_WARNING("target not halted");
1917 return ERROR_TARGET_NOT_HALTED;
1918 }
1919
1920 /* sanitize arguments */
1921 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1922 return ERROR_INVALID_ARGUMENTS;
1923
1924 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1925 return ERROR_TARGET_UNALIGNED_ACCESS;
1926
1927 /* send memory write request (command 0x2n, n: access size) */
1928 if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
1929 return retval;
1930
1931 /* send base address for read request */
1932 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1933 return retval;
1934
1935 /* send number of requested data words to be written*/
1936 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1937 return retval;
1938
1939 /* extract data from host-endian buffer into byte stream */
1940 #if 0
1941 for (i = 0; i < count; i++)
1942 {
1943 switch (size)
1944 {
1945 case 4:
1946 value = target_buffer_get_u32(target, buffer);
1947 xscale_send_u32(target, value);
1948 buffer += 4;
1949 break;
1950 case 2:
1951 value = target_buffer_get_u16(target, buffer);
1952 xscale_send_u32(target, value);
1953 buffer += 2;
1954 break;
1955 case 1:
1956 value = *buffer;
1957 xscale_send_u32(target, value);
1958 buffer += 1;
1959 break;
1960 default:
1961 LOG_ERROR("should never get here");
1962 exit(-1);
1963 }
1964 }
1965 #endif
1966 if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
1967 return retval;
1968
1969 /* examine DCSR, to see if Sticky Abort (SA) got set */
1970 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1971 return retval;
1972 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1973 {
1974 /* clear SA bit */
1975 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1976 return retval;
1977
1978 return ERROR_TARGET_DATA_ABORT;
1979 }
1980
1981 return ERROR_OK;
1982 }
1983
1984 static int xscale_write_phys_memory(struct target *target, uint32_t address,
1985 uint32_t size, uint32_t count, uint8_t *buffer)
1986 {
1987 struct xscale_common *xscale = target_to_xscale(target);
1988
1989 /* with MMU inactive, there are only physical addresses */
1990 if (!xscale->armv4_5_mmu.mmu_enabled)
1991 return xscale_read_memory(target, address, size, count, buffer);
1992
1993 /** \todo: provide a non-stub implementation of this routine. */
1994 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1995 target_name(target), __func__);
1996 return ERROR_FAIL;
1997 }
1998
1999 static int xscale_bulk_write_memory(struct target *target, uint32_t address,
2000 uint32_t count, uint8_t *buffer)
2001 {
2002 return xscale_write_memory(target, address, 4, count, buffer);
2003 }
2004
2005 static uint32_t xscale_get_ttb(struct target *target)
2006 {
2007 struct xscale_common *xscale = target_to_xscale(target);
2008 uint32_t ttb;
2009
2010 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
2011 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
2012
2013 return ttb;
2014 }
2015
2016 static void xscale_disable_mmu_caches(struct target *target, int mmu,
2017 int d_u_cache, int i_cache)
2018 {
2019 struct xscale_common *xscale = target_to_xscale(target);
2020 uint32_t cp15_control;
2021
2022 /* read cp15 control register */
2023 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2024 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2025
2026 if (mmu)
2027 cp15_control &= ~0x1U;
2028
2029 if (d_u_cache)
2030 {
2031 /* clean DCache */
2032 xscale_send_u32(target, 0x50);
2033 xscale_send_u32(target, xscale->cache_clean_address);
2034
2035 /* invalidate DCache */
2036 xscale_send_u32(target, 0x51);
2037
2038 cp15_control &= ~0x4U;
2039 }
2040
2041 if (i_cache)
2042 {
2043 /* invalidate ICache */
2044 xscale_send_u32(target, 0x52);
2045 cp15_control &= ~0x1000U;
2046 }
2047
2048 /* write new cp15 control register */
2049 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2050
2051 /* execute cpwait to ensure outstanding operations complete */
2052 xscale_send_u32(target, 0x53);
2053 }
2054
2055 static void xscale_enable_mmu_caches(struct target *target, int mmu,
2056 int d_u_cache, int i_cache)
2057 {
2058 struct xscale_common *xscale = target_to_xscale(target);
2059 uint32_t cp15_control;
2060
2061 /* read cp15 control register */
2062 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2063 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2064
2065 if (mmu)
2066 cp15_control |= 0x1U;
2067
2068 if (d_u_cache)
2069 cp15_control |= 0x4U;
2070
2071 if (i_cache)
2072 cp15_control |= 0x1000U;
2073
2074 /* write new cp15 control register */
2075 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2076
2077 /* execute cpwait to ensure outstanding operations complete */
2078 xscale_send_u32(target, 0x53);
2079 }
2080
2081 static int xscale_set_breakpoint(struct target *target,
2082 struct breakpoint *breakpoint)
2083 {
2084 int retval;
2085 struct xscale_common *xscale = target_to_xscale(target);
2086
2087 if (target->state != TARGET_HALTED)
2088 {
2089 LOG_WARNING("target not halted");
2090 return ERROR_TARGET_NOT_HALTED;
2091 }
2092
2093 if (breakpoint->set)
2094 {
2095 LOG_WARNING("breakpoint already set");
2096 return ERROR_OK;
2097 }
2098
2099 if (breakpoint->type == BKPT_HARD)
2100 {
2101 uint32_t value = breakpoint->address | 1;
2102 if (!xscale->ibcr0_used)
2103 {
2104 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2105 xscale->ibcr0_used = 1;
2106 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2107 }
2108 else if (!xscale->ibcr1_used)
2109 {
2110 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2111 xscale->ibcr1_used = 1;
2112 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2113 }
2114 else
2115 {
2116 LOG_ERROR("BUG: no hardware comparator available");
2117 return ERROR_OK;
2118 }
2119 }
2120 else if (breakpoint->type == BKPT_SOFT)
2121 {
2122 if (breakpoint->length == 4)
2123 {
2124 /* keep the original instruction in target endianness */
2125 if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2126 {
2127 return retval;
2128 }
2129 /* write the bkpt instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2130 if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2131 {
2132 return retval;
2133 }
2134 }
2135 else
2136 {
2137 /* keep the original instruction in target endianness */
2138 if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2139 {
2140 return retval;
2141 }
2142 /* write the bkpt instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2143 if ((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2144 {
2145 return retval;
2146 }
2147 }
2148 breakpoint->set = 1;
2149
2150 xscale_send_u32(target, 0x50); /* clean dcache */
2151 xscale_send_u32(target, xscale->cache_clean_address);
2152 xscale_send_u32(target, 0x51); /* invalidate dcache */
2153 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2154 }
2155
2156 return ERROR_OK;
2157 }
2158
2159 static int xscale_add_breakpoint(struct target *target,
2160 struct breakpoint *breakpoint)
2161 {
2162 struct xscale_common *xscale = target_to_xscale(target);
2163
2164 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2165 {
2166 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2167 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2168 }
2169
2170 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2171 {
2172 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2173 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2174 }
2175
2176 if (breakpoint->type == BKPT_HARD)
2177 {
2178 xscale->ibcr_available--;
2179 }
2180
2181 return ERROR_OK;
2182 }
2183
2184 static int xscale_unset_breakpoint(struct target *target,
2185 struct breakpoint *breakpoint)
2186 {
2187 int retval;
2188 struct xscale_common *xscale = target_to_xscale(target);
2189
2190 if (target->state != TARGET_HALTED)
2191 {
2192 LOG_WARNING("target not halted");
2193 return ERROR_TARGET_NOT_HALTED;
2194 }
2195
2196 if (!breakpoint->set)
2197 {
2198 LOG_WARNING("breakpoint not set");
2199 return ERROR_OK;
2200 }
2201
2202 if (breakpoint->type == BKPT_HARD)
2203 {
2204 if (breakpoint->set == 1)
2205 {
2206 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2207 xscale->ibcr0_used = 0;
2208 }
2209 else if (breakpoint->set == 2)
2210 {
2211 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2212 xscale->ibcr1_used = 0;
2213 }
2214 breakpoint->set = 0;
2215 }
2216 else
2217 {
2218 /* restore original instruction (kept in target endianness) */
2219 if (breakpoint->length == 4)
2220 {
2221 if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2222 {
2223 return retval;
2224 }
2225 }
2226 else
2227 {
2228 if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2229 {
2230 return retval;
2231 }
2232 }
2233 breakpoint->set = 0;
2234
2235 xscale_send_u32(target, 0x50); /* clean dcache */
2236 xscale_send_u32(target, xscale->cache_clean_address);
2237 xscale_send_u32(target, 0x51); /* invalidate dcache */
2238 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2239 }
2240
2241 return ERROR_OK;
2242 }
2243
2244 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2245 {
2246 struct xscale_common *xscale = target_to_xscale(target);
2247
2248 if (target->state != TARGET_HALTED)
2249 {
2250 LOG_WARNING("target not halted");
2251 return ERROR_TARGET_NOT_HALTED;
2252 }
2253
2254 if (breakpoint->set)
2255 {
2256 xscale_unset_breakpoint(target, breakpoint);
2257 }
2258
2259 if (breakpoint->type == BKPT_HARD)
2260 xscale->ibcr_available++;
2261
2262 return ERROR_OK;
2263 }
2264
2265 static int xscale_set_watchpoint(struct target *target,
2266 struct watchpoint *watchpoint)
2267 {
2268 struct xscale_common *xscale = target_to_xscale(target);
2269 uint8_t enable = 0;
2270 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2271 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2272
2273 if (target->state != TARGET_HALTED)
2274 {
2275 LOG_WARNING("target not halted");
2276 return ERROR_TARGET_NOT_HALTED;
2277 }
2278
2279 xscale_get_reg(dbcon);
2280
2281 switch (watchpoint->rw)
2282 {
2283 case WPT_READ:
2284 enable = 0x3;
2285 break;
2286 case WPT_ACCESS:
2287 enable = 0x2;
2288 break;
2289 case WPT_WRITE:
2290 enable = 0x1;
2291 break;
2292 default:
2293 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2294 }
2295
2296 if (!xscale->dbr0_used)
2297 {
2298 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2299 dbcon_value |= enable;
2300 xscale_set_reg_u32(dbcon, dbcon_value);
2301 watchpoint->set = 1;
2302 xscale->dbr0_used = 1;
2303 }
2304 else if (!xscale->dbr1_used)
2305 {
2306 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2307 dbcon_value |= enable << 2;
2308 xscale_set_reg_u32(dbcon, dbcon_value);
2309 watchpoint->set = 2;
2310 xscale->dbr1_used = 1;
2311 }
2312 else
2313 {
2314 LOG_ERROR("BUG: no hardware comparator available");
2315 return ERROR_OK;
2316 }
2317
2318 return ERROR_OK;
2319 }
2320
2321 static int xscale_add_watchpoint(struct target *target,
2322 struct watchpoint *watchpoint)
2323 {
2324 struct xscale_common *xscale = target_to_xscale(target);
2325
2326 if (xscale->dbr_available < 1)
2327 {
2328 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2329 }
2330
2331 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2332 {
2333 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2334 }
2335
2336 xscale->dbr_available--;
2337
2338 return ERROR_OK;
2339 }
2340
2341 static int xscale_unset_watchpoint(struct target *target,
2342 struct watchpoint *watchpoint)
2343 {
2344 struct xscale_common *xscale = target_to_xscale(target);
2345 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2346 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2347
2348 if (target->state != TARGET_HALTED)
2349 {
2350 LOG_WARNING("target not halted");
2351 return ERROR_TARGET_NOT_HALTED;
2352 }
2353
2354 if (!watchpoint->set)
2355 {
2356 LOG_WARNING("breakpoint not set");
2357 return ERROR_OK;
2358 }
2359
2360 if (watchpoint->set == 1)
2361 {
2362 dbcon_value &= ~0x3;
2363 xscale_set_reg_u32(dbcon, dbcon_value);
2364 xscale->dbr0_used = 0;
2365 }
2366 else if (watchpoint->set == 2)
2367 {
2368 dbcon_value &= ~0xc;
2369 xscale_set_reg_u32(dbcon, dbcon_value);
2370 xscale->dbr1_used = 0;
2371 }
2372 watchpoint->set = 0;
2373
2374 return ERROR_OK;
2375 }
2376
2377 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2378 {
2379 struct xscale_common *xscale = target_to_xscale(target);
2380
2381 if (target->state != TARGET_HALTED)
2382 {
2383 LOG_WARNING("target not halted");
2384 return ERROR_TARGET_NOT_HALTED;
2385 }
2386
2387 if (watchpoint->set)
2388 {
2389 xscale_unset_watchpoint(target, watchpoint);
2390 }
2391
2392 xscale->dbr_available++;
2393
2394 return ERROR_OK;
2395 }
2396
2397 static int xscale_get_reg(struct reg *reg)
2398 {
2399 struct xscale_reg *arch_info = reg->arch_info;
2400 struct target *target = arch_info->target;
2401 struct xscale_common *xscale = target_to_xscale(target);
2402
2403 /* DCSR, TX and RX are accessible via JTAG */
2404 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2405 {
2406 return xscale_read_dcsr(arch_info->target);
2407 }
2408 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2409 {
2410 /* 1 = consume register content */
2411 return xscale_read_tx(arch_info->target, 1);
2412 }
2413 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2414 {
2415 /* can't read from RX register (host -> debug handler) */
2416 return ERROR_OK;
2417 }
2418 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2419 {
2420 /* can't (explicitly) read from TXRXCTRL register */
2421 return ERROR_OK;
2422 }
2423 else /* Other DBG registers have to be transfered by the debug handler */
2424 {
2425 /* send CP read request (command 0x40) */
2426 xscale_send_u32(target, 0x40);
2427
2428 /* send CP register number */
2429 xscale_send_u32(target, arch_info->dbg_handler_number);
2430
2431 /* read register value */
2432 xscale_read_tx(target, 1);
2433 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2434
2435 reg->dirty = 0;
2436 reg->valid = 1;
2437 }
2438
2439 return ERROR_OK;
2440 }
2441
2442 static int xscale_set_reg(struct reg *reg, uint8_t* buf)
2443 {
2444 struct xscale_reg *arch_info = reg->arch_info;
2445 struct target *target = arch_info->target;
2446 struct xscale_common *xscale = target_to_xscale(target);
2447 uint32_t value = buf_get_u32(buf, 0, 32);
2448
2449 /* DCSR, TX and RX are accessible via JTAG */
2450 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2451 {
2452 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2453 return xscale_write_dcsr(arch_info->target, -1, -1);
2454 }
2455 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2456 {
2457 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2458 return xscale_write_rx(arch_info->target);
2459 }
2460 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2461 {
2462 /* can't write to TX register (debug-handler -> host) */
2463 return ERROR_OK;
2464 }
2465 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2466 {
2467 /* can't (explicitly) write to TXRXCTRL register */
2468 return ERROR_OK;
2469 }
2470 else /* Other DBG registers have to be transfered by the debug handler */
2471 {
2472 /* send CP write request (command 0x41) */
2473 xscale_send_u32(target, 0x41);
2474
2475 /* send CP register number */
2476 xscale_send_u32(target, arch_info->dbg_handler_number);
2477
2478 /* send CP register value */
2479 xscale_send_u32(target, value);
2480 buf_set_u32(reg->value, 0, 32, value);
2481 }
2482
2483 return ERROR_OK;
2484 }
2485
2486 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2487 {
2488 struct xscale_common *xscale = target_to_xscale(target);
2489 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2490 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2491
2492 /* send CP write request (command 0x41) */
2493 xscale_send_u32(target, 0x41);
2494
2495 /* send CP register number */
2496 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2497
2498 /* send CP register value */
2499 xscale_send_u32(target, value);
2500 buf_set_u32(dcsr->value, 0, 32, value);
2501
2502 return ERROR_OK;
2503 }
2504
2505 static int xscale_read_trace(struct target *target)
2506 {
2507 struct xscale_common *xscale = target_to_xscale(target);
2508 struct arm *armv4_5 = &xscale->armv4_5_common;
2509 struct xscale_trace_data **trace_data_p;
2510
2511 /* 258 words from debug handler
2512 * 256 trace buffer entries
2513 * 2 checkpoint addresses
2514 */
2515 uint32_t trace_buffer[258];
2516 int is_address[256];
2517 int i, j;
2518 unsigned int num_checkpoints = 0;
2519
2520 if (target->state != TARGET_HALTED)
2521 {
2522 LOG_WARNING("target must be stopped to read trace data");
2523 return ERROR_TARGET_NOT_HALTED;
2524 }
2525
2526 /* send read trace buffer command (command 0x61) */
2527 xscale_send_u32(target, 0x61);
2528
2529 /* receive trace buffer content */
2530 xscale_receive(target, trace_buffer, 258);
2531
2532 /* parse buffer backwards to identify address entries */
2533 for (i = 255; i >= 0; i--)
2534 {
2535 /* also count number of checkpointed entries */
2536 if ((trace_buffer[i] & 0xe0) == 0xc0)
2537 num_checkpoints++;
2538
2539 is_address[i] = 0;
2540 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2541 ((trace_buffer[i] & 0xf0) == 0xd0))
2542 {
2543 if (i > 0)
2544 is_address[--i] = 1;
2545 if (i > 0)
2546 is_address[--i] = 1;
2547 if (i > 0)
2548 is_address[--i] = 1;
2549 if (i > 0)
2550 is_address[--i] = 1;
2551 }
2552 }
2553
2554
2555 /* search first non-zero entry that is not part of an address */
2556 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2557 ;
2558
2559 if (j == 256)
2560 {
2561 LOG_DEBUG("no trace data collected");
2562 return ERROR_XSCALE_NO_TRACE_DATA;
2563 }
2564
2565 /* account for possible partial address at buffer start (wrap mode only) */
2566 if (is_address[0])
2567 { /* first entry is address; complete set of 4? */
2568 i = 1;
2569 while (i < 4)
2570 if (!is_address[i++])
2571 break;
2572 if (i < 4)
2573 j += i; /* partial address; can't use it */
2574 }
2575
2576 /* if first valid entry is indirect branch, can't use that either (no address) */
2577 if (((trace_buffer[j] & 0xf0) == 0x90) || ((trace_buffer[j] & 0xf0) == 0xd0))
2578 j++;
2579
2580 /* walk linked list to terminating entry */
2581 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2582 ;
2583
2584 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2585 (*trace_data_p)->next = NULL;
2586 (*trace_data_p)->chkpt0 = trace_buffer[256];
2587 (*trace_data_p)->chkpt1 = trace_buffer[257];
2588 (*trace_data_p)->last_instruction =
2589 buf_get_u32(armv4_5->pc->value, 0, 32);
2590 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2591 (*trace_data_p)->depth = 256 - j;
2592 (*trace_data_p)->num_checkpoints = num_checkpoints;
2593
2594 for (i = j; i < 256; i++)
2595 {
2596 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2597 if (is_address[i])
2598 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2599 else
2600 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2601 }
2602
2603 return ERROR_OK;
2604 }
2605
2606 static int xscale_read_instruction(struct target *target, uint32_t pc,
2607 struct arm_instruction *instruction)
2608 {
2609 struct xscale_common *const xscale = target_to_xscale(target);
2610 int i;
2611 int section = -1;
2612 size_t size_read;
2613 uint32_t opcode;
2614 int retval;
2615
2616 if (!xscale->trace.image)
2617 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2618
2619 /* search for the section the current instruction belongs to */
2620 for (i = 0; i < xscale->trace.image->num_sections; i++)
2621 {
2622 if ((xscale->trace.image->sections[i].base_address <= pc) &&
2623 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > pc))
2624 {
2625 section = i;
2626 break;
2627 }
2628 }
2629
2630 if (section == -1)
2631 {
2632 /* current instruction couldn't be found in the image */
2633 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2634 }
2635
2636 if (xscale->trace.core_state == ARM_STATE_ARM)
2637 {
2638 uint8_t buf[4];
2639 if ((retval = image_read_section(xscale->trace.image, section,
2640 pc - xscale->trace.image->sections[section].base_address,
2641 4, buf, &size_read)) != ERROR_OK)
2642 {
2643 LOG_ERROR("error while reading instruction: %i", retval);
2644 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2645 }
2646 opcode = target_buffer_get_u32(target, buf);
2647 arm_evaluate_opcode(opcode, pc, instruction);
2648 }
2649 else if (xscale->trace.core_state == ARM_STATE_THUMB)
2650 {
2651 uint8_t buf[2];
2652 if ((retval = image_read_section(xscale->trace.image, section,
2653 pc - xscale->trace.image->sections[section].base_address,
2654 2, buf, &size_read)) != ERROR_OK)
2655 {
2656 LOG_ERROR("error while reading instruction: %i", retval);
2657 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2658 }
2659 opcode = target_buffer_get_u16(target, buf);
2660 thumb_evaluate_opcode(opcode, pc, instruction);
2661 }
2662 else
2663 {
2664 LOG_ERROR("BUG: unknown core state encountered");
2665 exit(-1);
2666 }
2667
2668 return ERROR_OK;
2669 }
2670
2671 /* Extract address encoded into trace data.
2672 * Write result to address referenced by argument 'target', or 0 if incomplete. */
2673 static inline void xscale_branch_address(struct xscale_trace_data *trace_data,
2674 int i, uint32_t *target)
2675 {
2676 /* if there are less than four entries prior to the indirect branch message
2677 * we can't extract the address */
2678 if (i < 4)
2679 *target = 0;
2680 else
2681 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2682 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2683 }
2684
2685 static inline void xscale_display_instruction(struct target *target, uint32_t pc,
2686 struct arm_instruction *instruction,
2687 struct command_context *cmd_ctx)
2688 {
2689 int retval = xscale_read_instruction(target, pc, instruction);
2690 if (retval == ERROR_OK)
2691 command_print(cmd_ctx, "%s", instruction->text);
2692 else
2693 command_print(cmd_ctx, "0x%8.8" PRIx32 "\t<not found in image>", pc);
2694 }
2695
2696 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2697 {
2698 struct xscale_common *xscale = target_to_xscale(target);
2699 struct xscale_trace_data *trace_data = xscale->trace.data;
2700 int i, retval;
2701 uint32_t breakpoint_pc;
2702 struct arm_instruction instruction;
2703 uint32_t current_pc = 0; /* initialized when address determined */
2704
2705 if (!xscale->trace.image)
2706 LOG_WARNING("No trace image loaded; use 'xscale trace_image'");
2707
2708 /* loop for each trace buffer that was loaded from target */
2709 while (trace_data)
2710 {
2711 int chkpt = 0; /* incremented as checkpointed entries found */
2712 int j;
2713
2714 /* FIXME: set this to correct mode when trace buffer is first enabled */
2715 xscale->trace.core_state = ARM_STATE_ARM;
2716
2717 /* loop for each entry in this trace buffer */
2718 for (i = 0; i < trace_data->depth; i++)
2719 {
2720 int exception = 0;
2721 uint32_t chkpt_reg = 0x0;
2722 uint32_t branch_target = 0;
2723 int count;
2724
2725 /* trace entry type is upper nybble of 'message byte' */
2726 int trace_msg_type = (trace_data->entries[i].data & 0xf0) >> 4;
2727
2728 /* Target addresses of indirect branches are written into buffer
2729 * before the message byte representing the branch. Skip past it */
2730 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2731 continue;
2732
2733 switch (trace_msg_type)
2734 {
2735 case 0: /* Exceptions */
2736 case 1:
2737 case 2:
2738 case 3:
2739 case 4:
2740 case 5:
2741 case 6:
2742 case 7:
2743 exception = (trace_data->entries[i].data & 0x70) >> 4;
2744
2745 /* FIXME: vector table may be at ffff0000 */
2746 branch_target = (trace_data->entries[i].data & 0xf0) >> 2;
2747 break;
2748
2749 case 8: /* Direct Branch */
2750 break;
2751
2752 case 9: /* Indirect Branch */
2753 xscale_branch_address(trace_data, i, &branch_target);
2754 break;
2755
2756 case 13: /* Checkpointed Indirect Branch */
2757 xscale_branch_address(trace_data, i, &branch_target);
2758 if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
2759 chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is oldest */
2760 else
2761 chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and newest */
2762
2763 chkpt++;
2764 break;
2765
2766 case 12: /* Checkpointed Direct Branch */
2767 if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
2768 chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is oldest */
2769 else
2770 chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and newest */
2771
2772 /* if no current_pc, checkpoint will be starting point */
2773 if (current_pc == 0)
2774 branch_target = chkpt_reg;
2775
2776 chkpt++;
2777 break;
2778
2779 case 15: /* Roll-over */
2780 break;
2781
2782 default: /* Reserved */
2783 LOG_WARNING("trace is suspect: invalid trace message byte");
2784 continue;
2785
2786 }
2787
2788 /* If we don't have the current_pc yet, but we did get the branch target
2789 * (either from the trace buffer on indirect branch, or from a checkpoint reg),
2790 * then we can start displaying instructions at the next iteration, with
2791 * branch_target as the starting point.
2792 */
2793 if (current_pc == 0)
2794 {
2795 current_pc = branch_target; /* remains 0 unless branch_target obtained */
2796 continue;
2797 }
2798
2799 /* We have current_pc. Read and display the instructions from the image.
2800 * First, display count instructions (lower nybble of message byte). */
2801 count = trace_data->entries[i].data & 0x0f;
2802 for (j = 0; j < count; j++)
2803 {
2804 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2805 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2806 }
2807
2808 /* An additional instruction is implicitly added to count for
2809 * rollover and some exceptions: undef, swi, prefetch abort. */
2810 if ((trace_msg_type == 15) || (exception > 0 && exception < 4))
2811 {
2812 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2813 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2814 }
2815
2816 if (trace_msg_type == 15) /* rollover */
2817 continue;
2818
2819 if (exception)
2820 {
2821 command_print(cmd_ctx, "--- exception %i ---", exception);
2822 continue;
2823 }
2824
2825 /* not exception or rollover; next instruction is a branch and is
2826 * not included in the count */
2827 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2828
2829 /* for direct branches, extract branch destination from instruction */
2830 if ((trace_msg_type == 8) || (trace_msg_type == 12))
2831 {
2832 retval = xscale_read_instruction(target, current_pc, &instruction);
2833 if (retval == ERROR_OK)
2834 current_pc = instruction.info.b_bl_bx_blx.target_address;
2835 else
2836 current_pc = 0; /* branch destination unknown */
2837
2838 /* direct branch w/ checkpoint; can also get from checkpoint reg */
2839 if (trace_msg_type == 12)
2840 {
2841 if (current_pc == 0)
2842 current_pc = chkpt_reg;
2843 else if (current_pc != chkpt_reg) /* sanity check */
2844 LOG_WARNING("trace is suspect: checkpoint register "
2845 "inconsistent with adddress from image");
2846 }
2847
2848 if (current_pc == 0)
2849 command_print(cmd_ctx, "address unknown");
2850
2851 continue;
2852 }
2853
2854 /* indirect branch; the branch destination was read from trace buffer */
2855 if ((trace_msg_type == 9) || (trace_msg_type == 13))
2856 {
2857 current_pc = branch_target;
2858
2859 /* sanity check (checkpoint reg is redundant) */
2860 if ((trace_msg_type == 13) && (chkpt_reg != branch_target))
2861 LOG_WARNING("trace is suspect: checkpoint register "
2862 "inconsistent with address from trace buffer");
2863 }
2864
2865 } /* END: for (i = 0; i < trace_data->depth; i++) */
2866
2867 breakpoint_pc = trace_data->last_instruction; /* used below */
2868 trace_data = trace_data->next;
2869
2870 } /* END: while (trace_data) */
2871
2872 /* Finally... display all instructions up to the value of the pc when the
2873 * debug break occurred (saved when trace data was collected from target).
2874 * This is necessary because the trace only records execution branches and 16
2875 * consecutive instructions (rollovers), so last few typically missed.
2876 */
2877 if (current_pc == 0)
2878 return ERROR_OK; /* current_pc was never found */
2879
2880 /* how many instructions remaining? */
2881 int gap_count = (breakpoint_pc - current_pc) /
2882 (xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2);
2883
2884 /* should never be negative or over 16, but verify */
2885 if (gap_count < 0 || gap_count > 16)
2886 {
2887 LOG_WARNING("trace is suspect: excessive gap at end of trace");
2888 return ERROR_OK; /* bail; large number or negative value no good */
2889 }
2890
2891 /* display remaining instructions */
2892 for (i = 0; i < gap_count; i++)
2893 {
2894 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2895 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2896 }
2897
2898 return ERROR_OK;
2899 }
2900
2901 static const struct reg_arch_type xscale_reg_type = {
2902 .get = xscale_get_reg,
2903 .set = xscale_set_reg,
2904 };
2905
2906 static void xscale_build_reg_cache(struct target *target)
2907 {
2908 struct xscale_common *xscale = target_to_xscale(target);
2909 struct arm *armv4_5 = &xscale->armv4_5_common;
2910 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2911 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2912 int i;
2913 int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
2914
2915 (*cache_p) = arm_build_reg_cache(target, armv4_5);
2916
2917 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2918 cache_p = &(*cache_p)->next;
2919
2920 /* fill in values for the xscale reg cache */
2921 (*cache_p)->name = "XScale registers";
2922 (*cache_p)->next = NULL;
2923 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
2924 (*cache_p)->num_regs = num_regs;
2925
2926 for (i = 0; i < num_regs; i++)
2927 {
2928 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2929 (*cache_p)->reg_list[i].value = calloc(4, 1);
2930 (*cache_p)->reg_list[i].dirty = 0;
2931 (*cache_p)->reg_list[i].valid = 0;
2932 (*cache_p)->reg_list[i].size = 32;
2933 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2934 (*cache_p)->reg_list[i].type = &xscale_reg_type;
2935 arch_info[i] = xscale_reg_arch_info[i];
2936 arch_info[i].target = target;
2937 }
2938
2939 xscale->reg_cache = (*cache_p);
2940 }
2941
2942 static int xscale_init_target(struct command_context *cmd_ctx,
2943 struct target *target)
2944 {
2945 xscale_build_reg_cache(target);
2946 return ERROR_OK;
2947 }
2948
2949 static int xscale_init_arch_info(struct target *target,
2950 struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
2951 {
2952 struct arm *armv4_5;
2953 uint32_t high_reset_branch, low_reset_branch;
2954 int i;
2955
2956 armv4_5 = &xscale->armv4_5_common;
2957
2958 /* store architecture specfic data */
2959 xscale->common_magic = XSCALE_COMMON_MAGIC;
2960
2961 /* we don't really *need* a variant param ... */
2962 if (variant) {
2963 int ir_length = 0;
2964
2965 if (strcmp(variant, "pxa250") == 0
2966 || strcmp(variant, "pxa255") == 0
2967 || strcmp(variant, "pxa26x") == 0)
2968 ir_length = 5;
2969 else if (strcmp(variant, "pxa27x") == 0
2970 || strcmp(variant, "ixp42x") == 0
2971 || strcmp(variant, "ixp45x") == 0
2972 || strcmp(variant, "ixp46x") == 0)
2973 ir_length = 7;
2974 else if (strcmp(variant, "pxa3xx") == 0)
2975 ir_length = 11;
2976 else
2977 LOG_WARNING("%s: unrecognized variant %s",
2978 tap->dotted_name, variant);
2979
2980 if (ir_length && ir_length != tap->ir_length) {
2981 LOG_WARNING("%s: IR length for %s is %d; fixing",
2982 tap->dotted_name, variant, ir_length);
2983 tap->ir_length = ir_length;
2984 }
2985 }
2986
2987 /* PXA3xx shifts the JTAG instructions */
2988 if (tap->ir_length == 11)
2989 xscale->xscale_variant = XSCALE_PXA3XX;
2990 else
2991 xscale->xscale_variant = XSCALE_IXP4XX_PXA2XX;
2992
2993 /* the debug handler isn't installed (and thus not running) at this time */
2994 xscale->handler_address = 0xfe000800;
2995
2996 /* clear the vectors we keep locally for reference */
2997 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2998 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2999
3000 /* no user-specified vectors have been configured yet */
3001 xscale->static_low_vectors_set = 0x0;
3002 xscale->static_high_vectors_set = 0x0;
3003
3004 /* calculate branches to debug handler */
3005 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
3006 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
3007
3008 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
3009 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
3010
3011 for (i = 1; i <= 7; i++)
3012 {
3013 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3014 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3015 }
3016
3017 /* 64kB aligned region used for DCache cleaning */
3018 xscale->cache_clean_address = 0xfffe0000;
3019
3020 xscale->hold_rst = 0;
3021 xscale->external_debug_break = 0;
3022
3023 xscale->ibcr_available = 2;
3024 xscale->ibcr0_used = 0;
3025 xscale->ibcr1_used = 0;
3026
3027 xscale->dbr_available = 2;
3028 xscale->dbr0_used = 0;
3029 xscale->dbr1_used = 0;
3030
3031 LOG_INFO("%s: hardware has 2 breakpoints and 2 watchpoints",
3032 target_name(target));
3033
3034 xscale->arm_bkpt = ARMV5_BKPT(0x0);
3035 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
3036
3037 xscale->vector_catch = 0x1;
3038
3039 xscale->trace.capture_status = TRACE_IDLE;
3040 xscale->trace.data = NULL;
3041 xscale->trace.image = NULL;
3042 xscale->trace.buffer_enabled = 0;
3043 xscale->trace.buffer_fill = 0;
3044
3045 /* prepare ARMv4/5 specific information */
3046 armv4_5->arch_info = xscale;
3047 armv4_5->read_core_reg = xscale_read_core_reg;
3048 armv4_5->write_core_reg = xscale_write_core_reg;
3049 armv4_5->full_context = xscale_full_context;
3050
3051 arm_init_arch_info(target, armv4_5);
3052
3053 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
3054 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
3055 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
3056 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
3057 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
3058 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
3059 xscale->armv4_5_mmu.has_tiny_pages = 1;
3060 xscale->armv4_5_mmu.mmu_enabled = 0;
3061
3062 return ERROR_OK;
3063 }
3064
3065 static int xscale_target_create(struct target *target, Jim_Interp *interp)
3066 {
3067 struct xscale_common *xscale;
3068
3069 if (sizeof xscale_debug_handler - 1 > 0x800) {
3070 LOG_ERROR("debug_handler.bin: larger than 2kb");
3071 return ERROR_FAIL;
3072 }
3073
3074 xscale = calloc(1, sizeof(*xscale));
3075 if (!xscale)
3076 return ERROR_FAIL;
3077
3078 return xscale_init_arch_info(target, xscale, target->tap,
3079 target->variant);
3080 }
3081
3082 COMMAND_HANDLER(xscale_handle_debug_handler_command)
3083 {
3084 struct target *target = NULL;
3085 struct xscale_common *xscale;
3086 int retval;
3087 uint32_t handler_address;
3088
3089 if (CMD_ARGC < 2)
3090 {
3091 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3092 return ERROR_OK;
3093 }
3094
3095 if ((target = get_target(CMD_ARGV[0])) == NULL)
3096 {
3097 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3098 return ERROR_FAIL;
3099 }
3100
3101 xscale = target_to_xscale(target);
3102 retval = xscale_verify_pointer(CMD_CTX, xscale);
3103 if (retval != ERROR_OK)
3104 return retval;
3105
3106 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3107
3108 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3109 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3110 {
3111 xscale->handler_address = handler_address;
3112 }
3113 else
3114 {
3115 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3116 return ERROR_FAIL;
3117 }
3118
3119 return ERROR_OK;
3120 }
3121
3122 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3123 {
3124 struct target *target = NULL;
3125 struct xscale_common *xscale;
3126 int retval;
3127 uint32_t cache_clean_address;
3128
3129 if (CMD_ARGC < 2)
3130 {
3131 return ERROR_COMMAND_SYNTAX_ERROR;
3132 }
3133
3134 target = get_target(CMD_ARGV[0]);
3135 if (target == NULL)
3136 {
3137 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3138 return ERROR_FAIL;
3139 }
3140 xscale = target_to_xscale(target);
3141 retval = xscale_verify_pointer(CMD_CTX, xscale);
3142 if (retval != ERROR_OK)
3143 return retval;
3144
3145 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3146
3147 if (cache_clean_address & 0xffff)
3148 {
3149 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3150 }
3151 else
3152 {
3153 xscale->cache_clean_address = cache_clean_address;
3154 }
3155
3156 return ERROR_OK;
3157 }
3158
3159 COMMAND_HANDLER(xscale_handle_cache_info_command)
3160 {
3161 struct target *target = get_current_target(CMD_CTX);
3162 struct xscale_common *xscale = target_to_xscale(target);
3163 int retval;
3164
3165 retval = xscale_verify_pointer(CMD_CTX, xscale);
3166 if (retval != ERROR_OK)
3167 return retval;
3168
3169 return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
3170 }
3171
3172 static int xscale_virt2phys(struct target *target,
3173 uint32_t virtual, uint32_t *physical)
3174 {
3175 struct xscale_common *xscale = target_to_xscale(target);
3176 int type;
3177 uint32_t cb;
3178 int domain;
3179 uint32_t ap;
3180
3181 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3182 LOG_ERROR(xscale_not);
3183 return ERROR_TARGET_INVALID;
3184 }
3185
3186 uint32_t ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3187 if (type == -1)
3188 {
3189 return ret;
3190 }
3191 *physical = ret;
3192 return ERROR_OK;
3193 }
3194
3195 static int xscale_mmu(struct target *target, int *enabled)
3196 {
3197 struct xscale_common *xscale = target_to_xscale(target);
3198
3199 if (target->state != TARGET_HALTED)
3200 {
3201 LOG_ERROR("Target not halted");
3202 return ERROR_TARGET_INVALID;
3203 }
3204 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3205 return ERROR_OK;
3206 }
3207
3208 COMMAND_HANDLER(xscale_handle_mmu_command)
3209 {
3210 struct target *target = get_current_target(CMD_CTX);
3211 struct xscale_common *xscale = target_to_xscale(target);
3212 int retval;
3213
3214 retval = xscale_verify_pointer(CMD_CTX, xscale);
3215 if (retval != ERROR_OK)
3216 return retval;
3217
3218 if (target->state != TARGET_HALTED)
3219 {
3220 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3221 return ERROR_OK;
3222 }
3223
3224 if (CMD_ARGC >= 1)
3225 {
3226 bool enable;
3227 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3228 if (enable)
3229 xscale_enable_mmu_caches(target, 1, 0, 0);
3230 else
3231 xscale_disable_mmu_caches(target, 1, 0, 0);
3232 xscale->armv4_5_mmu.mmu_enabled = enable;
3233 }
3234
3235 command_print(CMD_CTX, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3236
3237 return ERROR_OK;
3238 }
3239
3240 COMMAND_HANDLER(xscale_handle_idcache_command)
3241 {
3242 struct target *target = get_current_target(CMD_CTX);
3243 struct xscale_common *xscale = target_to_xscale(target);
3244
3245 int retval = xscale_verify_pointer(CMD_CTX, xscale);
3246 if (retval != ERROR_OK)
3247 return retval;
3248
3249 if (target->state != TARGET_HALTED)
3250 {
3251 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3252 return ERROR_OK;
3253 }
3254
3255 bool icache = false;
3256 if (strcmp(CMD_NAME, "icache") == 0)
3257 icache = true;
3258 if (CMD_ARGC >= 1)
3259 {
3260 bool enable;
3261 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3262 if (icache) {
3263 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3264 if (enable)
3265 xscale_enable_mmu_caches(target, 0, 0, 1);
3266 else
3267 xscale_disable_mmu_caches(target, 0, 0, 1);
3268 } else {
3269 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3270 if (enable)
3271 xscale_enable_mmu_caches(target, 0, 1, 0);
3272 else
3273 xscale_disable_mmu_caches(target, 0, 1, 0);
3274 }
3275 }
3276
3277 bool enabled = icache ?
3278 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3279 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3280 const char *msg = enabled ? "enabled" : "disabled";
3281 command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
3282
3283 return ERROR_OK;
3284 }
3285
3286 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3287 {
3288 struct target *target = get_current_target(CMD_CTX);
3289 struct xscale_common *xscale = target_to_xscale(target);
3290 int retval;
3291
3292 retval = xscale_verify_pointer(CMD_CTX, xscale);
3293 if (retval != ERROR_OK)
3294 return retval;
3295
3296 if (CMD_ARGC < 1)
3297 {
3298 command_print(CMD_CTX, "usage: xscale vector_catch [mask]");
3299 }
3300 else
3301 {
3302 COMMAND_PARSE_NUMBER(u8, CMD_ARGV[0], xscale->vector_catch);
3303 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3304 xscale_write_dcsr(target, -1, -1);
3305 }
3306
3307 command_print(CMD_CTX, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3308
3309 return ERROR_OK;
3310 }
3311
3312
3313 COMMAND_HANDLER(xscale_handle_vector_table_command)
3314 {
3315 struct target *target = get_current_target(CMD_CTX);
3316 struct xscale_common *xscale = target_to_xscale(target);
3317 int err = 0;
3318 int retval;
3319
3320 retval = xscale_verify_pointer(CMD_CTX, xscale);
3321 if (retval != ERROR_OK)
3322 return retval;
3323
3324 if (CMD_ARGC == 0) /* print current settings */
3325 {
3326 int idx;
3327
3328 command_print(CMD_CTX, "active user-set static vectors:");
3329 for (idx = 1; idx < 8; idx++)
3330 if (xscale->static_low_vectors_set & (1 << idx))
3331 command_print(CMD_CTX, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
3332 for (idx = 1; idx < 8; idx++)
3333 if (xscale->static_high_vectors_set & (1 << idx))
3334 command_print(CMD_CTX, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
3335 return ERROR_OK;
3336 }
3337
3338 if (CMD_ARGC != 3)
3339 err = 1;
3340 else
3341 {
3342 int idx;
3343 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3344 uint32_t vec;
3345 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3346
3347 if (idx < 1 || idx >= 8)
3348 err = 1;
3349
3350 if (!err && strcmp(CMD_ARGV[0], "low") == 0)
3351 {
3352 xscale->static_low_vectors_set |= (1<<idx);
3353 xscale->static_low_vectors[idx] = vec;
3354 }
3355 else if (!err && (strcmp(CMD_ARGV[0], "high") == 0))
3356 {
3357 xscale->static_high_vectors_set |= (1<<idx);
3358 xscale->static_high_vectors[idx] = vec;
3359 }
3360 else
3361 err = 1;
3362 }
3363
3364 if (err)
3365 command_print(CMD_CTX, "usage: xscale vector_table <high|low> <index> <code>");
3366
3367 return ERROR_OK;
3368 }
3369
3370
3371 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3372 {
3373 struct target *target = get_current_target(CMD_CTX);
3374 struct xscale_common *xscale = target_to_xscale(target);
3375 uint32_t dcsr_value;
3376 int retval;
3377
3378 retval = xscale_verify_pointer(CMD_CTX, xscale);
3379 if (retval != ERROR_OK)
3380 return retval;
3381
3382 if (target->state != TARGET_HALTED)
3383 {
3384 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3385 return ERROR_OK;
3386 }
3387
3388 if ((CMD_ARGC >= 1) && (strcmp("enable", CMD_ARGV[0]) == 0))
3389 {
3390 struct xscale_trace_data *td, *next_td;
3391 xscale->trace.buffer_enabled = 1;
3392
3393 /* free old trace data */
3394 td = xscale->trace.data;
3395 while (td)
3396 {
3397 next_td = td->next;
3398
3399 if (td->entries)
3400 free(td->entries);
3401 free(td);
3402 td = next_td;
3403 }
3404 xscale->trace.data = NULL;
3405 }
3406 else if ((CMD_ARGC >= 1) && (strcmp("disable", CMD_ARGV[0]) == 0))
3407 {
3408 xscale->trace.buffer_enabled = 0;
3409 }
3410
3411 if ((CMD_ARGC >= 2) && (strcmp("fill", CMD_ARGV[1]) == 0))
3412 {
3413 uint32_t fill = 1;
3414 if (CMD_ARGC >= 3)
3415 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], fill);
3416 xscale->trace.buffer_fill = fill;
3417 }
3418 else if ((CMD_ARGC >= 2) && (strcmp("wrap", CMD_ARGV[1]) == 0))
3419 {
3420 xscale->trace.buffer_fill = -1;
3421 }
3422
3423 command_print(CMD_CTX, "trace buffer %s (%s)",
3424 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3425 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3426
3427 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3428 if (xscale->trace.buffer_fill >= 0)
3429 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3430 else
3431 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3432
3433 return ERROR_OK;
3434 }
3435
3436 COMMAND_HANDLER(xscale_handle_trace_image_command)
3437 {
3438 struct target *target = get_current_target(CMD_CTX);
3439 struct xscale_common *xscale = target_to_xscale(target);
3440 int retval;
3441
3442 if (CMD_ARGC < 1)
3443 {
3444 command_print(CMD_CTX, "usage: xscale trace_image <file> [base address] [type]");
3445 return ERROR_OK;
3446 }
3447
3448 retval = xscale_verify_pointer(CMD_CTX, xscale);
3449 if (retval != ERROR_OK)
3450 return retval;
3451
3452 if (xscale->trace.image)
3453 {
3454 image_close(xscale->trace.image);
3455 free(xscale->trace.image);
3456 command_print(CMD_CTX, "previously loaded image found and closed");
3457 }
3458
3459 xscale->trace.image = malloc(sizeof(struct image));
3460 xscale->trace.image->base_address_set = 0;
3461 xscale->trace.image->start_address_set = 0;
3462
3463 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3464 if (CMD_ARGC >= 2)
3465 {
3466 xscale->trace.image->base_address_set = 1;
3467 COMMAND_PARSE_NUMBER(llong, CMD_ARGV[1], xscale->trace.image->base_address);
3468 }
3469 else
3470 {
3471 xscale->trace.image->base_address_set = 0;
3472 }
3473
3474 if (image_open(xscale->trace.image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3475 {
3476 free(xscale->trace.image);
3477 xscale->trace.image = NULL;
3478 return ERROR_OK;
3479 }
3480
3481 return ERROR_OK;
3482 }
3483
3484 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3485 {
3486 struct target *target = get_current_target(CMD_CTX);
3487 struct xscale_common *xscale = target_to_xscale(target);
3488 struct xscale_trace_data *trace_data;
3489 struct fileio file;
3490 int retval;
3491
3492 retval = xscale_verify_pointer(CMD_CTX, xscale);
3493 if (retval != ERROR_OK)
3494 return retval;
3495
3496 if (target->state != TARGET_HALTED)
3497 {
3498 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3499 return ERROR_OK;
3500 }
3501
3502 if (CMD_ARGC < 1)
3503 {
3504 command_print(CMD_CTX, "usage: xscale dump_trace <file>");
3505 return ERROR_OK;
3506 }
3507
3508 trace_data = xscale->trace.data;
3509
3510 if (!trace_data)
3511 {
3512 command_print(CMD_CTX, "no trace data collected");
3513 return ERROR_OK;
3514 }
3515
3516 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3517 {
3518 return ERROR_OK;
3519 }
3520
3521 while (trace_data)
3522 {
3523 int i;
3524
3525 fileio_write_u32(&file, trace_data->chkpt0);
3526 fileio_write_u32(&file, trace_data->chkpt1);
3527 fileio_write_u32(&file, trace_data->last_instruction);
3528 fileio_write_u32(&file, trace_data->depth);
3529
3530 for (i = 0; i < trace_data->depth; i++)
3531 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3532
3533 trace_data = trace_data->next;
3534 }
3535
3536 fileio_close(&file);
3537
3538 return ERROR_OK;
3539 }
3540
3541 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3542 {
3543 struct target *target = get_current_target(CMD_CTX);
3544 struct xscale_common *xscale = target_to_xscale(target);
3545 int retval;
3546
3547 retval = xscale_verify_pointer(CMD_CTX, xscale);
3548 if (retval != ERROR_OK)
3549 return retval;
3550
3551 xscale_analyze_trace(target, CMD_CTX);
3552
3553 return ERROR_OK;
3554 }
3555
3556 COMMAND_HANDLER(xscale_handle_cp15)
3557 {
3558 struct target *target = get_current_target(CMD_CTX);
3559 struct xscale_common *xscale = target_to_xscale(target);
3560 int retval;
3561
3562 retval = xscale_verify_pointer(CMD_CTX, xscale);
3563 if (retval != ERROR_OK)
3564 return retval;
3565
3566 if (target->state != TARGET_HALTED)
3567 {
3568 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3569 return ERROR_OK;
3570 }
3571 uint32_t reg_no = 0;
3572 struct reg *reg = NULL;
3573 if (CMD_ARGC > 0)
3574 {
3575 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3576 /*translate from xscale cp15 register no to openocd register*/
3577 switch (reg_no)
3578 {
3579 case 0:
3580 reg_no = XSCALE_MAINID;
3581 break;
3582 case 1:
3583 reg_no = XSCALE_CTRL;
3584 break;
3585 case 2:
3586 reg_no = XSCALE_TTB;
3587 break;
3588 case 3:
3589 reg_no = XSCALE_DAC;
3590 break;
3591 case 5:
3592 reg_no = XSCALE_FSR;
3593 break;
3594 case 6:
3595 reg_no = XSCALE_FAR;
3596 break;
3597 case 13:
3598 reg_no = XSCALE_PID;
3599 break;
3600 case 15:
3601 reg_no = XSCALE_CPACCESS;
3602 break;
3603 default:
3604 command_print(CMD_CTX, "invalid register number");
3605 return ERROR_INVALID_ARGUMENTS;
3606 }
3607 reg = &xscale->reg_cache->reg_list[reg_no];
3608
3609 }
3610 if (CMD_ARGC == 1)
3611 {
3612 uint32_t value;
3613
3614 /* read cp15 control register */
3615 xscale_get_reg(reg);
3616 value = buf_get_u32(reg->value, 0, 32);
3617 command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
3618 }
3619 else if (CMD_ARGC == 2)
3620 {
3621 uint32_t value;
3622 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3623
3624 /* send CP write request (command 0x41) */
3625 xscale_send_u32(target, 0x41);
3626
3627 /* send CP register number */
3628 xscale_send_u32(target, reg_no);
3629
3630 /* send CP register value */
3631 xscale_send_u32(target, value);
3632
3633 /* execute cpwait to ensure outstanding operations complete */
3634 xscale_send_u32(target, 0x53);
3635 }
3636 else
3637 {
3638 command_print(CMD_CTX, "usage: cp15 [register]<, [value]>");
3639 }
3640
3641 return ERROR_OK;
3642 }
3643
3644 static const struct command_registration xscale_exec_command_handlers[] = {
3645 {
3646 .name = "cache_info",
3647 .handler = xscale_handle_cache_info_command,
3648 .mode = COMMAND_EXEC,
3649 .help = "display information about CPU caches",
3650 },
3651 {
3652 .name = "mmu",
3653 .handler = xscale_handle_mmu_command,
3654 .mode = COMMAND_EXEC,
3655 .help = "enable or disable the MMU",
3656 .usage = "['enable'|'disable']",
3657 },
3658 {
3659 .name = "icache",
3660 .handler = xscale_handle_idcache_command,
3661 .mode = COMMAND_EXEC,
3662 .help = "display ICache state, optionally enabling or "
3663 "disabling it",
3664 .usage = "['enable'|'disable']",
3665 },
3666 {
3667 .name = "dcache",
3668 .handler = xscale_handle_idcache_command,
3669 .mode = COMMAND_EXEC,
3670 .help = "display DCache state, optionally enabling or "
3671 "disabling it",
3672 .usage = "['enable'|'disable']",
3673 },
3674 {
3675 .name = "vector_catch",
3676 .handler = xscale_handle_vector_catch_command,
3677 .mode = COMMAND_EXEC,
3678 .help = "set or display 8-bit mask of vectors "
3679 "that should trigger debug entry",
3680 .usage = "[mask]",
3681 },
3682 {
3683 .name = "vector_table",
3684 .handler = xscale_handle_vector_table_command,
3685 .mode = COMMAND_EXEC,
3686 .help = "set vector table entry in mini-ICache, "
3687 "or display current tables",
3688 .usage = "[('high'|'low') index code]",
3689 },
3690 {
3691 .name = "trace_buffer",
3692 .handler = xscale_handle_trace_buffer_command,
3693 .mode = COMMAND_EXEC,
3694 .help = "display trace buffer status, enable or disable "
3695 "tracing, and optionally reconfigure trace mode",
3696 .usage = "['enable'|'disable' ['fill' number|'wrap']]",
3697 },
3698 {
3699 .name = "dump_trace",
3700 .handler = xscale_handle_dump_trace_command,
3701 .mode = COMMAND_EXEC,
3702 .help = "dump content of trace buffer to file",
3703 .usage = "filename",
3704 },
3705 {
3706 .name = "analyze_trace",
3707 .handler = xscale_handle_analyze_trace_buffer_command,
3708 .mode = COMMAND_EXEC,
3709 .help = "analyze content of trace buffer",
3710 .usage = "",
3711 },
3712 {
3713 .name = "trace_image",
3714 .handler = xscale_handle_trace_image_command,
3715 .mode = COMMAND_EXEC,
3716 .help = "load image from file to address (default 0)",
3717 .usage = "filename [offset [filetype]]",
3718 },
3719 {
3720 .name = "cp15",
3721 .handler = xscale_handle_cp15,
3722 .mode = COMMAND_EXEC,
3723 .help = "Read or write coprocessor 15 register.",
3724 .usage = "register [value]",
3725 },
3726 COMMAND_REGISTRATION_DONE
3727 };
3728 static const struct command_registration xscale_any_command_handlers[] = {
3729 {
3730 .name = "debug_handler",
3731 .handler = xscale_handle_debug_handler_command,
3732 .mode = COMMAND_ANY,
3733 .help = "Change address used for debug handler.",
3734 .usage = "target address",
3735 },
3736 {
3737 .name = "cache_clean_address",
3738 .handler = xscale_handle_cache_clean_address_command,
3739 .mode = COMMAND_ANY,
3740 .help = "Change address used for cleaning data cache.",
3741 .usage = "address",
3742 },
3743 {
3744 .chain = xscale_exec_command_handlers,
3745 },
3746 COMMAND_REGISTRATION_DONE
3747 };
3748 static const struct command_registration xscale_command_handlers[] = {
3749 {
3750 .chain = arm_command_handlers,
3751 },
3752 {
3753 .name = "xscale",
3754 .mode = COMMAND_ANY,
3755 .help = "xscale command group",
3756 .chain = xscale_any_command_handlers,
3757 },
3758 COMMAND_REGISTRATION_DONE
3759 };
3760
3761 struct target_type xscale_target =
3762 {
3763 .name = "xscale",
3764
3765 .poll = xscale_poll,
3766 .arch_state = xscale_arch_state,
3767
3768 .target_request_data = NULL,
3769
3770 .halt = xscale_halt,
3771 .resume = xscale_resume,
3772 .step = xscale_step,
3773
3774 .assert_reset = xscale_assert_reset,
3775 .deassert_reset = xscale_deassert_reset,
3776 .soft_reset_halt = NULL,
3777
3778 /* REVISIT on some cores, allow exporting iwmmxt registers ... */
3779 .get_gdb_reg_list = arm_get_gdb_reg_list,
3780
3781 .read_memory = xscale_read_memory,
3782 .read_phys_memory = xscale_read_phys_memory,
3783 .write_memory = xscale_write_memory,
3784 .write_phys_memory = xscale_write_phys_memory,
3785 .bulk_write_memory = xscale_bulk_write_memory,
3786
3787 .checksum_memory = arm_checksum_memory,
3788 .blank_check_memory = arm_blank_check_memory,
3789
3790 .run_algorithm = armv4_5_run_algorithm,
3791
3792 .add_breakpoint = xscale_add_breakpoint,
3793 .remove_breakpoint = xscale_remove_breakpoint,
3794 .add_watchpoint = xscale_add_watchpoint,
3795 .remove_watchpoint = xscale_remove_watchpoint,
3796
3797 .commands = xscale_command_handlers,
3798 .target_create = xscale_target_create,
3799 .init_target = xscale_init_target,
3800
3801 .virt2phys = xscale_virt2phys,
3802 .mmu = xscale_mmu
3803 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)