xscale: fix trace buffer functionality when resuming from a breakpoint
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
29
30 #include "breakpoints.h"
31 #include "xscale.h"
32 #include "target_type.h"
33 #include "arm_jtag.h"
34 #include "arm_simulator.h"
35 #include "arm_disassembler.h"
36 #include <helper/time_support.h>
37 #include "register.h"
38 #include "image.h"
39 #include "arm_opcodes.h"
40 #include "armv4_5.h"
41
42
43 /*
44 * Important XScale documents available as of October 2009 include:
45 *
46 * Intel XScale® Core Developer’s Manual, January 2004
47 * Order Number: 273473-002
48 * This has a chapter detailing debug facilities, and punts some
49 * details to chip-specific microarchitecture documents.
50 *
51 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
52 * Document Number: 273539-005
53 * Less detailed than the developer's manual, but summarizes those
54 * missing details (for most XScales) and gives LOTS of notes about
55 * debugger/handler interaction issues. Presents a simpler reset
56 * and load-handler sequence than the arch doc. (Note, OpenOCD
57 * doesn't currently support "Hot-Debug" as defined there.)
58 *
59 * Chip-specific microarchitecture documents may also be useful.
60 */
61
62
63 /* forward declarations */
64 static int xscale_resume(struct target *, int current,
65 uint32_t address, int handle_breakpoints, int debug_execution);
66 static int xscale_debug_entry(struct target *);
67 static int xscale_restore_banked(struct target *);
68 static int xscale_get_reg(struct reg *reg);
69 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
70 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
71 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
72 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
73 static int xscale_read_trace(struct target *);
74
75
76 /* This XScale "debug handler" is loaded into the processor's
77 * mini-ICache, which is 2K of code writable only via JTAG.
78 *
79 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
80 * binary files cleanly. It's string oriented, and terminates them
81 * with a NUL character. Better would be to generate the constants
82 * and let other code decide names, scoping, and other housekeeping.
83 */
84 static /* unsigned const char xscale_debug_handler[] = ... */
85 #include "xscale_debug.h"
86
87 static char *const xscale_reg_list[] =
88 {
89 "XSCALE_MAINID", /* 0 */
90 "XSCALE_CACHETYPE",
91 "XSCALE_CTRL",
92 "XSCALE_AUXCTRL",
93 "XSCALE_TTB",
94 "XSCALE_DAC",
95 "XSCALE_FSR",
96 "XSCALE_FAR",
97 "XSCALE_PID",
98 "XSCALE_CPACCESS",
99 "XSCALE_IBCR0", /* 10 */
100 "XSCALE_IBCR1",
101 "XSCALE_DBR0",
102 "XSCALE_DBR1",
103 "XSCALE_DBCON",
104 "XSCALE_TBREG",
105 "XSCALE_CHKPT0",
106 "XSCALE_CHKPT1",
107 "XSCALE_DCSR",
108 "XSCALE_TX",
109 "XSCALE_RX", /* 20 */
110 "XSCALE_TXRXCTRL",
111 };
112
113 static const struct xscale_reg xscale_reg_arch_info[] =
114 {
115 {XSCALE_MAINID, NULL},
116 {XSCALE_CACHETYPE, NULL},
117 {XSCALE_CTRL, NULL},
118 {XSCALE_AUXCTRL, NULL},
119 {XSCALE_TTB, NULL},
120 {XSCALE_DAC, NULL},
121 {XSCALE_FSR, NULL},
122 {XSCALE_FAR, NULL},
123 {XSCALE_PID, NULL},
124 {XSCALE_CPACCESS, NULL},
125 {XSCALE_IBCR0, NULL},
126 {XSCALE_IBCR1, NULL},
127 {XSCALE_DBR0, NULL},
128 {XSCALE_DBR1, NULL},
129 {XSCALE_DBCON, NULL},
130 {XSCALE_TBREG, NULL},
131 {XSCALE_CHKPT0, NULL},
132 {XSCALE_CHKPT1, NULL},
133 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
134 {-1, NULL}, /* TX accessed via JTAG */
135 {-1, NULL}, /* RX accessed via JTAG */
136 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
137 };
138
139 /* convenience wrapper to access XScale specific registers */
140 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
141 {
142 uint8_t buf[4];
143
144 buf_set_u32(buf, 0, 32, value);
145
146 return xscale_set_reg(reg, buf);
147 }
148
149 static const char xscale_not[] = "target is not an XScale";
150
151 static int xscale_verify_pointer(struct command_context *cmd_ctx,
152 struct xscale_common *xscale)
153 {
154 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
155 command_print(cmd_ctx, xscale_not);
156 return ERROR_TARGET_INVALID;
157 }
158 return ERROR_OK;
159 }
160
161 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr, tap_state_t end_state)
162 {
163 if (tap == NULL)
164 return ERROR_FAIL;
165
166 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
167 {
168 struct scan_field field;
169 uint8_t scratch[4];
170
171 memset(&field, 0, sizeof field);
172 field.num_bits = tap->ir_length;
173 field.out_value = scratch;
174 buf_set_u32(scratch, 0, field.num_bits, new_instr);
175
176 jtag_add_ir_scan(tap, &field, end_state);
177 }
178
179 return ERROR_OK;
180 }
181
182 static int xscale_read_dcsr(struct target *target)
183 {
184 struct xscale_common *xscale = target_to_xscale(target);
185 int retval;
186 struct scan_field fields[3];
187 uint8_t field0 = 0x0;
188 uint8_t field0_check_value = 0x2;
189 uint8_t field0_check_mask = 0x7;
190 uint8_t field2 = 0x0;
191 uint8_t field2_check_value = 0x0;
192 uint8_t field2_check_mask = 0x1;
193
194 xscale_jtag_set_instr(target->tap,
195 XSCALE_SELDCSR << xscale->xscale_variant,
196 TAP_DRPAUSE);
197
198 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
199 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
200
201 memset(&fields, 0, sizeof fields);
202
203 fields[0].num_bits = 3;
204 fields[0].out_value = &field0;
205 uint8_t tmp;
206 fields[0].in_value = &tmp;
207
208 fields[1].num_bits = 32;
209 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
210
211 fields[2].num_bits = 1;
212 fields[2].out_value = &field2;
213 uint8_t tmp2;
214 fields[2].in_value = &tmp2;
215
216 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
217
218 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
219 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
220
221 if ((retval = jtag_execute_queue()) != ERROR_OK)
222 {
223 LOG_ERROR("JTAG error while reading DCSR");
224 return retval;
225 }
226
227 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
228 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
229
230 /* write the register with the value we just read
231 * on this second pass, only the first bit of field0 is guaranteed to be 0)
232 */
233 field0_check_mask = 0x1;
234 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
235 fields[1].in_value = NULL;
236
237 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
238
239 /* DANGER!!! this must be here. It will make sure that the arguments
240 * to jtag_set_check_value() does not go out of scope! */
241 return jtag_execute_queue();
242 }
243
244
245 static void xscale_getbuf(jtag_callback_data_t arg)
246 {
247 uint8_t *in = (uint8_t *)arg;
248 *((uint32_t *)in) = buf_get_u32(in, 0, 32);
249 }
250
251 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
252 {
253 if (num_words == 0)
254 return ERROR_INVALID_ARGUMENTS;
255
256 struct xscale_common *xscale = target_to_xscale(target);
257 int retval = ERROR_OK;
258 tap_state_t path[3];
259 struct scan_field fields[3];
260 uint8_t *field0 = malloc(num_words * 1);
261 uint8_t field0_check_value = 0x2;
262 uint8_t field0_check_mask = 0x6;
263 uint32_t *field1 = malloc(num_words * 4);
264 uint8_t field2_check_value = 0x0;
265 uint8_t field2_check_mask = 0x1;
266 int words_done = 0;
267 int words_scheduled = 0;
268 int i;
269
270 path[0] = TAP_DRSELECT;
271 path[1] = TAP_DRCAPTURE;
272 path[2] = TAP_DRSHIFT;
273
274 memset(&fields, 0, sizeof fields);
275
276 fields[0].num_bits = 3;
277 fields[0].check_value = &field0_check_value;
278 fields[0].check_mask = &field0_check_mask;
279
280 fields[1].num_bits = 32;
281
282 fields[2].num_bits = 1;
283 fields[2].check_value = &field2_check_value;
284 fields[2].check_mask = &field2_check_mask;
285
286 xscale_jtag_set_instr(target->tap,
287 XSCALE_DBGTX << xscale->xscale_variant,
288 TAP_IDLE);
289 jtag_add_runtest(1, TAP_IDLE); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
290
291 /* repeat until all words have been collected */
292 int attempts = 0;
293 while (words_done < num_words)
294 {
295 /* schedule reads */
296 words_scheduled = 0;
297 for (i = words_done; i < num_words; i++)
298 {
299 fields[0].in_value = &field0[i];
300
301 jtag_add_pathmove(3, path);
302
303 fields[1].in_value = (uint8_t *)(field1 + i);
304
305 jtag_add_dr_scan_check(target->tap, 3, fields, TAP_IDLE);
306
307 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
308
309 words_scheduled++;
310 }
311
312 if ((retval = jtag_execute_queue()) != ERROR_OK)
313 {
314 LOG_ERROR("JTAG error while receiving data from debug handler");
315 break;
316 }
317
318 /* examine results */
319 for (i = words_done; i < num_words; i++)
320 {
321 if (!(field0[0] & 1))
322 {
323 /* move backwards if necessary */
324 int j;
325 for (j = i; j < num_words - 1; j++)
326 {
327 field0[j] = field0[j + 1];
328 field1[j] = field1[j + 1];
329 }
330 words_scheduled--;
331 }
332 }
333 if (words_scheduled == 0)
334 {
335 if (attempts++==1000)
336 {
337 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
338 retval = ERROR_TARGET_TIMEOUT;
339 break;
340 }
341 }
342
343 words_done += words_scheduled;
344 }
345
346 for (i = 0; i < num_words; i++)
347 *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
348
349 free(field1);
350
351 return retval;
352 }
353
354 static int xscale_read_tx(struct target *target, int consume)
355 {
356 struct xscale_common *xscale = target_to_xscale(target);
357 tap_state_t path[3];
358 tap_state_t noconsume_path[6];
359 int retval;
360 struct timeval timeout, now;
361 struct scan_field fields[3];
362 uint8_t field0_in = 0x0;
363 uint8_t field0_check_value = 0x2;
364 uint8_t field0_check_mask = 0x6;
365 uint8_t field2_check_value = 0x0;
366 uint8_t field2_check_mask = 0x1;
367
368 xscale_jtag_set_instr(target->tap,
369 XSCALE_DBGTX << xscale->xscale_variant,
370 TAP_IDLE);
371
372 path[0] = TAP_DRSELECT;
373 path[1] = TAP_DRCAPTURE;
374 path[2] = TAP_DRSHIFT;
375
376 noconsume_path[0] = TAP_DRSELECT;
377 noconsume_path[1] = TAP_DRCAPTURE;
378 noconsume_path[2] = TAP_DREXIT1;
379 noconsume_path[3] = TAP_DRPAUSE;
380 noconsume_path[4] = TAP_DREXIT2;
381 noconsume_path[5] = TAP_DRSHIFT;
382
383 memset(&fields, 0, sizeof fields);
384
385 fields[0].num_bits = 3;
386 fields[0].in_value = &field0_in;
387
388 fields[1].num_bits = 32;
389 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
390
391 fields[2].num_bits = 1;
392 uint8_t tmp;
393 fields[2].in_value = &tmp;
394
395 gettimeofday(&timeout, NULL);
396 timeval_add_time(&timeout, 1, 0);
397
398 for (;;)
399 {
400 /* if we want to consume the register content (i.e. clear TX_READY),
401 * we have to go straight from Capture-DR to Shift-DR
402 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
403 */
404 if (consume)
405 jtag_add_pathmove(3, path);
406 else
407 {
408 jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
409 }
410
411 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
412
413 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
414 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
415
416 if ((retval = jtag_execute_queue()) != ERROR_OK)
417 {
418 LOG_ERROR("JTAG error while reading TX");
419 return ERROR_TARGET_TIMEOUT;
420 }
421
422 gettimeofday(&now, NULL);
423 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
424 {
425 LOG_ERROR("time out reading TX register");
426 return ERROR_TARGET_TIMEOUT;
427 }
428 if (!((!(field0_in & 1)) && consume))
429 {
430 goto done;
431 }
432 if (debug_level >= 3)
433 {
434 LOG_DEBUG("waiting 100ms");
435 alive_sleep(100); /* avoid flooding the logs */
436 } else
437 {
438 keep_alive();
439 }
440 }
441 done:
442
443 if (!(field0_in & 1))
444 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
445
446 return ERROR_OK;
447 }
448
449 static int xscale_write_rx(struct target *target)
450 {
451 struct xscale_common *xscale = target_to_xscale(target);
452 int retval;
453 struct timeval timeout, now;
454 struct scan_field fields[3];
455 uint8_t field0_out = 0x0;
456 uint8_t field0_in = 0x0;
457 uint8_t field0_check_value = 0x2;
458 uint8_t field0_check_mask = 0x6;
459 uint8_t field2 = 0x0;
460 uint8_t field2_check_value = 0x0;
461 uint8_t field2_check_mask = 0x1;
462
463 xscale_jtag_set_instr(target->tap,
464 XSCALE_DBGRX << xscale->xscale_variant,
465 TAP_IDLE);
466
467 memset(&fields, 0, sizeof fields);
468
469 fields[0].num_bits = 3;
470 fields[0].out_value = &field0_out;
471 fields[0].in_value = &field0_in;
472
473 fields[1].num_bits = 32;
474 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
475
476 fields[2].num_bits = 1;
477 fields[2].out_value = &field2;
478 uint8_t tmp;
479 fields[2].in_value = &tmp;
480
481 gettimeofday(&timeout, NULL);
482 timeval_add_time(&timeout, 1, 0);
483
484 /* poll until rx_read is low */
485 LOG_DEBUG("polling RX");
486 for (;;)
487 {
488 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
489
490 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
491 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
492
493 if ((retval = jtag_execute_queue()) != ERROR_OK)
494 {
495 LOG_ERROR("JTAG error while writing RX");
496 return retval;
497 }
498
499 gettimeofday(&now, NULL);
500 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
501 {
502 LOG_ERROR("time out writing RX register");
503 return ERROR_TARGET_TIMEOUT;
504 }
505 if (!(field0_in & 1))
506 goto done;
507 if (debug_level >= 3)
508 {
509 LOG_DEBUG("waiting 100ms");
510 alive_sleep(100); /* avoid flooding the logs */
511 } else
512 {
513 keep_alive();
514 }
515 }
516 done:
517
518 /* set rx_valid */
519 field2 = 0x1;
520 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
521
522 if ((retval = jtag_execute_queue()) != ERROR_OK)
523 {
524 LOG_ERROR("JTAG error while writing RX");
525 return retval;
526 }
527
528 return ERROR_OK;
529 }
530
531 /* send count elements of size byte to the debug handler */
532 static int xscale_send(struct target *target, uint8_t *buffer, int count, int size)
533 {
534 struct xscale_common *xscale = target_to_xscale(target);
535 uint32_t t[3];
536 int bits[3];
537 int retval;
538 int done_count = 0;
539
540 xscale_jtag_set_instr(target->tap,
541 XSCALE_DBGRX << xscale->xscale_variant,
542 TAP_IDLE);
543
544 bits[0]=3;
545 t[0]=0;
546 bits[1]=32;
547 t[2]=1;
548 bits[2]=1;
549 int endianness = target->endianness;
550 while (done_count++ < count)
551 {
552 switch (size)
553 {
554 case 4:
555 if (endianness == TARGET_LITTLE_ENDIAN)
556 {
557 t[1]=le_to_h_u32(buffer);
558 } else
559 {
560 t[1]=be_to_h_u32(buffer);
561 }
562 break;
563 case 2:
564 if (endianness == TARGET_LITTLE_ENDIAN)
565 {
566 t[1]=le_to_h_u16(buffer);
567 } else
568 {
569 t[1]=be_to_h_u16(buffer);
570 }
571 break;
572 case 1:
573 t[1]=buffer[0];
574 break;
575 default:
576 LOG_ERROR("BUG: size neither 4, 2 nor 1");
577 return ERROR_INVALID_ARGUMENTS;
578 }
579 jtag_add_dr_out(target->tap,
580 3,
581 bits,
582 t,
583 TAP_IDLE);
584 buffer += size;
585 }
586
587 if ((retval = jtag_execute_queue()) != ERROR_OK)
588 {
589 LOG_ERROR("JTAG error while sending data to debug handler");
590 return retval;
591 }
592
593 return ERROR_OK;
594 }
595
596 static int xscale_send_u32(struct target *target, uint32_t value)
597 {
598 struct xscale_common *xscale = target_to_xscale(target);
599
600 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
601 return xscale_write_rx(target);
602 }
603
604 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
605 {
606 struct xscale_common *xscale = target_to_xscale(target);
607 int retval;
608 struct scan_field fields[3];
609 uint8_t field0 = 0x0;
610 uint8_t field0_check_value = 0x2;
611 uint8_t field0_check_mask = 0x7;
612 uint8_t field2 = 0x0;
613 uint8_t field2_check_value = 0x0;
614 uint8_t field2_check_mask = 0x1;
615
616 if (hold_rst != -1)
617 xscale->hold_rst = hold_rst;
618
619 if (ext_dbg_brk != -1)
620 xscale->external_debug_break = ext_dbg_brk;
621
622 xscale_jtag_set_instr(target->tap,
623 XSCALE_SELDCSR << xscale->xscale_variant,
624 TAP_IDLE);
625
626 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
627 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
628
629 memset(&fields, 0, sizeof fields);
630
631 fields[0].num_bits = 3;
632 fields[0].out_value = &field0;
633 uint8_t tmp;
634 fields[0].in_value = &tmp;
635
636 fields[1].num_bits = 32;
637 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
638
639 fields[2].num_bits = 1;
640 fields[2].out_value = &field2;
641 uint8_t tmp2;
642 fields[2].in_value = &tmp2;
643
644 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
645
646 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
647 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
648
649 if ((retval = jtag_execute_queue()) != ERROR_OK)
650 {
651 LOG_ERROR("JTAG error while writing DCSR");
652 return retval;
653 }
654
655 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
656 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
657
658 return ERROR_OK;
659 }
660
661 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
662 static unsigned int parity (unsigned int v)
663 {
664 // unsigned int ov = v;
665 v ^= v >> 16;
666 v ^= v >> 8;
667 v ^= v >> 4;
668 v &= 0xf;
669 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
670 return (0x6996 >> v) & 1;
671 }
672
673 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
674 {
675 struct xscale_common *xscale = target_to_xscale(target);
676 uint8_t packet[4];
677 uint8_t cmd;
678 int word;
679 struct scan_field fields[2];
680
681 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
682
683 /* LDIC into IR */
684 xscale_jtag_set_instr(target->tap,
685 XSCALE_LDIC << xscale->xscale_variant,
686 TAP_IDLE);
687
688 /* CMD is b011 to load a cacheline into the Mini ICache.
689 * Loading into the main ICache is deprecated, and unused.
690 * It's followed by three zero bits, and 27 address bits.
691 */
692 buf_set_u32(&cmd, 0, 6, 0x3);
693
694 /* virtual address of desired cache line */
695 buf_set_u32(packet, 0, 27, va >> 5);
696
697 memset(&fields, 0, sizeof fields);
698
699 fields[0].num_bits = 6;
700 fields[0].out_value = &cmd;
701
702 fields[1].num_bits = 27;
703 fields[1].out_value = packet;
704
705 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
706
707 /* rest of packet is a cacheline: 8 instructions, with parity */
708 fields[0].num_bits = 32;
709 fields[0].out_value = packet;
710
711 fields[1].num_bits = 1;
712 fields[1].out_value = &cmd;
713
714 for (word = 0; word < 8; word++)
715 {
716 buf_set_u32(packet, 0, 32, buffer[word]);
717
718 uint32_t value;
719 memcpy(&value, packet, sizeof(uint32_t));
720 cmd = parity(value);
721
722 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
723 }
724
725 return jtag_execute_queue();
726 }
727
728 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
729 {
730 struct xscale_common *xscale = target_to_xscale(target);
731 uint8_t packet[4];
732 uint8_t cmd;
733 struct scan_field fields[2];
734
735 xscale_jtag_set_instr(target->tap,
736 XSCALE_LDIC << xscale->xscale_variant,
737 TAP_IDLE);
738
739 /* CMD for invalidate IC line b000, bits [6:4] b000 */
740 buf_set_u32(&cmd, 0, 6, 0x0);
741
742 /* virtual address of desired cache line */
743 buf_set_u32(packet, 0, 27, va >> 5);
744
745 memset(&fields, 0, sizeof fields);
746
747 fields[0].num_bits = 6;
748 fields[0].out_value = &cmd;
749
750 fields[1].num_bits = 27;
751 fields[1].out_value = packet;
752
753 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
754
755 return ERROR_OK;
756 }
757
758 static int xscale_update_vectors(struct target *target)
759 {
760 struct xscale_common *xscale = target_to_xscale(target);
761 int i;
762 int retval;
763
764 uint32_t low_reset_branch, high_reset_branch;
765
766 for (i = 1; i < 8; i++)
767 {
768 /* if there's a static vector specified for this exception, override */
769 if (xscale->static_high_vectors_set & (1 << i))
770 {
771 xscale->high_vectors[i] = xscale->static_high_vectors[i];
772 }
773 else
774 {
775 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
776 if (retval == ERROR_TARGET_TIMEOUT)
777 return retval;
778 if (retval != ERROR_OK)
779 {
780 /* Some of these reads will fail as part of normal execution */
781 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
782 }
783 }
784 }
785
786 for (i = 1; i < 8; i++)
787 {
788 if (xscale->static_low_vectors_set & (1 << i))
789 {
790 xscale->low_vectors[i] = xscale->static_low_vectors[i];
791 }
792 else
793 {
794 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
795 if (retval == ERROR_TARGET_TIMEOUT)
796 return retval;
797 if (retval != ERROR_OK)
798 {
799 /* Some of these reads will fail as part of normal execution */
800 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
801 }
802 }
803 }
804
805 /* calculate branches to debug handler */
806 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
807 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
808
809 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
810 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
811
812 /* invalidate and load exception vectors in mini i-cache */
813 xscale_invalidate_ic_line(target, 0x0);
814 xscale_invalidate_ic_line(target, 0xffff0000);
815
816 xscale_load_ic(target, 0x0, xscale->low_vectors);
817 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
818
819 return ERROR_OK;
820 }
821
822 static int xscale_arch_state(struct target *target)
823 {
824 struct xscale_common *xscale = target_to_xscale(target);
825 struct arm *armv4_5 = &xscale->armv4_5_common;
826
827 static const char *state[] =
828 {
829 "disabled", "enabled"
830 };
831
832 static const char *arch_dbg_reason[] =
833 {
834 "", "\n(processor reset)", "\n(trace buffer full)"
835 };
836
837 if (armv4_5->common_magic != ARM_COMMON_MAGIC)
838 {
839 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
840 return ERROR_INVALID_ARGUMENTS;
841 }
842
843 arm_arch_state(target);
844 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s%s",
845 state[xscale->armv4_5_mmu.mmu_enabled],
846 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
847 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
848 arch_dbg_reason[xscale->arch_debug_reason]);
849
850 return ERROR_OK;
851 }
852
853 static int xscale_poll(struct target *target)
854 {
855 int retval = ERROR_OK;
856
857 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
858 {
859 enum target_state previous_state = target->state;
860 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
861 {
862
863 /* there's data to read from the tx register, we entered debug state */
864 target->state = TARGET_HALTED;
865
866 /* process debug entry, fetching current mode regs */
867 retval = xscale_debug_entry(target);
868 }
869 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
870 {
871 LOG_USER("error while polling TX register, reset CPU");
872 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
873 target->state = TARGET_HALTED;
874 }
875
876 /* debug_entry could have overwritten target state (i.e. immediate resume)
877 * don't signal event handlers in that case
878 */
879 if (target->state != TARGET_HALTED)
880 return ERROR_OK;
881
882 /* if target was running, signal that we halted
883 * otherwise we reentered from debug execution */
884 if (previous_state == TARGET_RUNNING)
885 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
886 else
887 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
888 }
889
890 return retval;
891 }
892
893 static int xscale_debug_entry(struct target *target)
894 {
895 struct xscale_common *xscale = target_to_xscale(target);
896 struct arm *armv4_5 = &xscale->armv4_5_common;
897 uint32_t pc;
898 uint32_t buffer[10];
899 int i;
900 int retval;
901 uint32_t moe;
902
903 /* clear external dbg break (will be written on next DCSR read) */
904 xscale->external_debug_break = 0;
905 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
906 return retval;
907
908 /* get r0, pc, r1 to r7 and cpsr */
909 if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
910 return retval;
911
912 /* move r0 from buffer to register cache */
913 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
914 armv4_5->core_cache->reg_list[0].dirty = 1;
915 armv4_5->core_cache->reg_list[0].valid = 1;
916 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
917
918 /* move pc from buffer to register cache */
919 buf_set_u32(armv4_5->pc->value, 0, 32, buffer[1]);
920 armv4_5->pc->dirty = 1;
921 armv4_5->pc->valid = 1;
922 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
923
924 /* move data from buffer to register cache */
925 for (i = 1; i <= 7; i++)
926 {
927 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
928 armv4_5->core_cache->reg_list[i].dirty = 1;
929 armv4_5->core_cache->reg_list[i].valid = 1;
930 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
931 }
932
933 arm_set_cpsr(armv4_5, buffer[9]);
934 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
935
936 if (!is_arm_mode(armv4_5->core_mode))
937 {
938 target->state = TARGET_UNKNOWN;
939 LOG_ERROR("cpsr contains invalid mode value - communication failure");
940 return ERROR_TARGET_FAILURE;
941 }
942 LOG_DEBUG("target entered debug state in %s mode",
943 arm_mode_name(armv4_5->core_mode));
944
945 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
946 if (armv4_5->spsr) {
947 xscale_receive(target, buffer, 8);
948 buf_set_u32(armv4_5->spsr->value, 0, 32, buffer[7]);
949 armv4_5->spsr->dirty = false;
950 armv4_5->spsr->valid = true;
951 }
952 else
953 {
954 /* r8 to r14, but no spsr */
955 xscale_receive(target, buffer, 7);
956 }
957
958 /* move data from buffer to right banked register in cache */
959 for (i = 8; i <= 14; i++)
960 {
961 struct reg *r = arm_reg_current(armv4_5, i);
962
963 buf_set_u32(r->value, 0, 32, buffer[i - 8]);
964 r->dirty = false;
965 r->valid = true;
966 }
967
968 /* examine debug reason */
969 xscale_read_dcsr(target);
970 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
971
972 /* stored PC (for calculating fixup) */
973 pc = buf_get_u32(armv4_5->pc->value, 0, 32);
974
975 switch (moe)
976 {
977 case 0x0: /* Processor reset */
978 target->debug_reason = DBG_REASON_DBGRQ;
979 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
980 pc -= 4;
981 break;
982 case 0x1: /* Instruction breakpoint hit */
983 target->debug_reason = DBG_REASON_BREAKPOINT;
984 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
985 pc -= 4;
986 break;
987 case 0x2: /* Data breakpoint hit */
988 target->debug_reason = DBG_REASON_WATCHPOINT;
989 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
990 pc -= 4;
991 break;
992 case 0x3: /* BKPT instruction executed */
993 target->debug_reason = DBG_REASON_BREAKPOINT;
994 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
995 pc -= 4;
996 break;
997 case 0x4: /* Ext. debug event */
998 target->debug_reason = DBG_REASON_DBGRQ;
999 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1000 pc -= 4;
1001 break;
1002 case 0x5: /* Vector trap occured */
1003 target->debug_reason = DBG_REASON_BREAKPOINT;
1004 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1005 pc -= 4;
1006 break;
1007 case 0x6: /* Trace buffer full break */
1008 target->debug_reason = DBG_REASON_DBGRQ;
1009 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1010 pc -= 4;
1011 break;
1012 case 0x7: /* Reserved (may flag Hot-Debug support) */
1013 default:
1014 LOG_ERROR("Method of Entry is 'Reserved'");
1015 exit(-1);
1016 break;
1017 }
1018
1019 /* apply PC fixup */
1020 buf_set_u32(armv4_5->pc->value, 0, 32, pc);
1021
1022 /* on the first debug entry, identify cache type */
1023 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1024 {
1025 uint32_t cache_type_reg;
1026
1027 /* read cp15 cache type register */
1028 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1029 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1030
1031 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1032 }
1033
1034 /* examine MMU and Cache settings */
1035 /* read cp15 control register */
1036 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1037 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1038 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1039 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1040 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1041
1042 /* tracing enabled, read collected trace data */
1043 if (xscale->trace.buffer_enabled)
1044 {
1045 xscale_read_trace(target);
1046 xscale->trace.buffer_fill--;
1047
1048 /* resume if we're still collecting trace data */
1049 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1050 && (xscale->trace.buffer_fill > 0))
1051 {
1052 xscale_resume(target, 1, 0x0, 1, 0);
1053 }
1054 else
1055 {
1056 xscale->trace.buffer_enabled = 0;
1057 }
1058 }
1059
1060 return ERROR_OK;
1061 }
1062
1063 static int xscale_halt(struct target *target)
1064 {
1065 struct xscale_common *xscale = target_to_xscale(target);
1066
1067 LOG_DEBUG("target->state: %s",
1068 target_state_name(target));
1069
1070 if (target->state == TARGET_HALTED)
1071 {
1072 LOG_DEBUG("target was already halted");
1073 return ERROR_OK;
1074 }
1075 else if (target->state == TARGET_UNKNOWN)
1076 {
1077 /* this must not happen for a xscale target */
1078 LOG_ERROR("target was in unknown state when halt was requested");
1079 return ERROR_TARGET_INVALID;
1080 }
1081 else if (target->state == TARGET_RESET)
1082 {
1083 LOG_DEBUG("target->state == TARGET_RESET");
1084 }
1085 else
1086 {
1087 /* assert external dbg break */
1088 xscale->external_debug_break = 1;
1089 xscale_read_dcsr(target);
1090
1091 target->debug_reason = DBG_REASON_DBGRQ;
1092 }
1093
1094 return ERROR_OK;
1095 }
1096
1097 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1098 {
1099 struct xscale_common *xscale = target_to_xscale(target);
1100 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1101 int retval;
1102
1103 if (xscale->ibcr0_used)
1104 {
1105 struct breakpoint *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1106
1107 if (ibcr0_bp)
1108 {
1109 xscale_unset_breakpoint(target, ibcr0_bp);
1110 }
1111 else
1112 {
1113 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1114 exit(-1);
1115 }
1116 }
1117
1118 if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
1119 return retval;
1120
1121 return ERROR_OK;
1122 }
1123
1124 static int xscale_disable_single_step(struct target *target)
1125 {
1126 struct xscale_common *xscale = target_to_xscale(target);
1127 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1128 int retval;
1129
1130 if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
1131 return retval;
1132
1133 return ERROR_OK;
1134 }
1135
1136 static void xscale_enable_watchpoints(struct target *target)
1137 {
1138 struct watchpoint *watchpoint = target->watchpoints;
1139
1140 while (watchpoint)
1141 {
1142 if (watchpoint->set == 0)
1143 xscale_set_watchpoint(target, watchpoint);
1144 watchpoint = watchpoint->next;
1145 }
1146 }
1147
1148 static void xscale_enable_breakpoints(struct target *target)
1149 {
1150 struct breakpoint *breakpoint = target->breakpoints;
1151
1152 /* set any pending breakpoints */
1153 while (breakpoint)
1154 {
1155 if (breakpoint->set == 0)
1156 xscale_set_breakpoint(target, breakpoint);
1157 breakpoint = breakpoint->next;
1158 }
1159 }
1160
1161 static int xscale_resume(struct target *target, int current,
1162 uint32_t address, int handle_breakpoints, int debug_execution)
1163 {
1164 struct xscale_common *xscale = target_to_xscale(target);
1165 struct arm *armv4_5 = &xscale->armv4_5_common;
1166 struct breakpoint *breakpoint = target->breakpoints;
1167 uint32_t current_pc;
1168 int retval;
1169 int i;
1170
1171 LOG_DEBUG("-");
1172
1173 if (target->state != TARGET_HALTED)
1174 {
1175 LOG_WARNING("target not halted");
1176 return ERROR_TARGET_NOT_HALTED;
1177 }
1178
1179 if (!debug_execution)
1180 {
1181 target_free_all_working_areas(target);
1182 }
1183
1184 /* update vector tables */
1185 if ((retval = xscale_update_vectors(target)) != ERROR_OK)
1186 return retval;
1187
1188 /* current = 1: continue on current pc, otherwise continue at <address> */
1189 if (!current)
1190 buf_set_u32(armv4_5->pc->value, 0, 32, address);
1191
1192 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1193
1194 /* if we're at the reset vector, we have to simulate the branch */
1195 if (current_pc == 0x0)
1196 {
1197 arm_simulate_step(target, NULL);
1198 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1199 }
1200
1201 /* the front-end may request us not to handle breakpoints */
1202 if (handle_breakpoints)
1203 {
1204 breakpoint = breakpoint_find(target,
1205 buf_get_u32(armv4_5->pc->value, 0, 32));
1206 if (breakpoint != NULL)
1207 {
1208 uint32_t next_pc;
1209 int saved_trace_buffer_enabled;
1210
1211 /* there's a breakpoint at the current PC, we have to step over it */
1212 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1213 xscale_unset_breakpoint(target, breakpoint);
1214
1215 /* calculate PC of next instruction */
1216 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1217 {
1218 uint32_t current_opcode;
1219 target_read_u32(target, current_pc, &current_opcode);
1220 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1221 }
1222
1223 LOG_DEBUG("enable single-step");
1224 xscale_enable_single_step(target, next_pc);
1225
1226 /* restore banked registers */
1227 retval = xscale_restore_banked(target);
1228
1229 /* send resume request */
1230 xscale_send_u32(target, 0x30);
1231
1232 /* send CPSR */
1233 xscale_send_u32(target,
1234 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1235 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1236 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1237
1238 for (i = 7; i >= 0; i--)
1239 {
1240 /* send register */
1241 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1242 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1243 }
1244
1245 /* send PC */
1246 xscale_send_u32(target,
1247 buf_get_u32(armv4_5->pc->value, 0, 32));
1248 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32,
1249 buf_get_u32(armv4_5->pc->value, 0, 32));
1250
1251 /* disable trace data collection in xscale_debug_entry() */
1252 saved_trace_buffer_enabled = xscale->trace.buffer_enabled;
1253 xscale->trace.buffer_enabled = 0;
1254
1255 /* wait for and process debug entry */
1256 xscale_debug_entry(target);
1257
1258 /* re-enable trace buffer, if enabled previously */
1259 xscale->trace.buffer_enabled = saved_trace_buffer_enabled;
1260
1261 LOG_DEBUG("disable single-step");
1262 xscale_disable_single_step(target);
1263
1264 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1265 xscale_set_breakpoint(target, breakpoint);
1266 }
1267 }
1268
1269 /* enable any pending breakpoints and watchpoints */
1270 xscale_enable_breakpoints(target);
1271 xscale_enable_watchpoints(target);
1272
1273 /* restore banked registers */
1274 retval = xscale_restore_banked(target);
1275
1276 /* send resume request (command 0x30 or 0x31)
1277 * clean the trace buffer if it is to be enabled (0x62) */
1278 if (xscale->trace.buffer_enabled)
1279 {
1280 /* if trace buffer is set to 'fill' mode, save starting pc */
1281 if (xscale->trace.buffer_fill > 0)
1282 {
1283 xscale->trace.pc_ok = 1;
1284 xscale->trace.current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1285 }
1286 xscale_send_u32(target, 0x62);
1287 xscale_send_u32(target, 0x31);
1288 }
1289 else
1290 xscale_send_u32(target, 0x30);
1291
1292 /* send CPSR */
1293 xscale_send_u32(target, buf_get_u32(armv4_5->cpsr->value, 0, 32));
1294 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1295 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1296
1297 for (i = 7; i >= 0; i--)
1298 {
1299 /* send register */
1300 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1301 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1302 }
1303
1304 /* send PC */
1305 xscale_send_u32(target, buf_get_u32(armv4_5->pc->value, 0, 32));
1306 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1307 buf_get_u32(armv4_5->pc->value, 0, 32));
1308
1309 target->debug_reason = DBG_REASON_NOTHALTED;
1310
1311 if (!debug_execution)
1312 {
1313 /* registers are now invalid */
1314 register_cache_invalidate(armv4_5->core_cache);
1315 target->state = TARGET_RUNNING;
1316 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1317 }
1318 else
1319 {
1320 target->state = TARGET_DEBUG_RUNNING;
1321 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1322 }
1323
1324 LOG_DEBUG("target resumed");
1325
1326 return ERROR_OK;
1327 }
1328
1329 static int xscale_step_inner(struct target *target, int current,
1330 uint32_t address, int handle_breakpoints)
1331 {
1332 struct xscale_common *xscale = target_to_xscale(target);
1333 struct arm *armv4_5 = &xscale->armv4_5_common;
1334 uint32_t next_pc;
1335 int retval;
1336 int i;
1337
1338 target->debug_reason = DBG_REASON_SINGLESTEP;
1339
1340 /* calculate PC of next instruction */
1341 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1342 {
1343 uint32_t current_opcode, current_pc;
1344 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1345
1346 target_read_u32(target, current_pc, &current_opcode);
1347 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1348 return retval;
1349 }
1350
1351 LOG_DEBUG("enable single-step");
1352 if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
1353 return retval;
1354
1355 /* restore banked registers */
1356 if ((retval = xscale_restore_banked(target)) != ERROR_OK)
1357 return retval;
1358
1359 /* send resume request (command 0x30 or 0x31)
1360 * clean the trace buffer if it is to be enabled (0x62) */
1361 if (xscale->trace.buffer_enabled)
1362 {
1363 if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
1364 return retval;
1365 if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
1366 return retval;
1367 }
1368 else
1369 if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
1370 return retval;
1371
1372 /* send CPSR */
1373 retval = xscale_send_u32(target,
1374 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1375 if (retval != ERROR_OK)
1376 return retval;
1377 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1378 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1379
1380 for (i = 7; i >= 0; i--)
1381 {
1382 /* send register */
1383 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
1384 return retval;
1385 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1386 }
1387
1388 /* send PC */
1389 retval = xscale_send_u32(target,
1390 buf_get_u32(armv4_5->pc->value, 0, 32));
1391 if (retval != ERROR_OK)
1392 return retval;
1393 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1394 buf_get_u32(armv4_5->pc->value, 0, 32));
1395
1396 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1397
1398 /* registers are now invalid */
1399 register_cache_invalidate(armv4_5->core_cache);
1400
1401 /* wait for and process debug entry */
1402 if ((retval = xscale_debug_entry(target)) != ERROR_OK)
1403 return retval;
1404
1405 LOG_DEBUG("disable single-step");
1406 if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
1407 return retval;
1408
1409 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1410
1411 return ERROR_OK;
1412 }
1413
1414 static int xscale_step(struct target *target, int current,
1415 uint32_t address, int handle_breakpoints)
1416 {
1417 struct arm *armv4_5 = target_to_arm(target);
1418 struct breakpoint *breakpoint = NULL;
1419
1420 uint32_t current_pc;
1421 int retval;
1422
1423 if (target->state != TARGET_HALTED)
1424 {
1425 LOG_WARNING("target not halted");
1426 return ERROR_TARGET_NOT_HALTED;
1427 }
1428
1429 /* current = 1: continue on current pc, otherwise continue at <address> */
1430 if (!current)
1431 buf_set_u32(armv4_5->pc->value, 0, 32, address);
1432
1433 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1434
1435 /* if we're at the reset vector, we have to simulate the step */
1436 if (current_pc == 0x0)
1437 {
1438 if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
1439 return retval;
1440 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1441
1442 target->debug_reason = DBG_REASON_SINGLESTEP;
1443 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1444
1445 return ERROR_OK;
1446 }
1447
1448 /* the front-end may request us not to handle breakpoints */
1449 if (handle_breakpoints)
1450 breakpoint = breakpoint_find(target,
1451 buf_get_u32(armv4_5->pc->value, 0, 32));
1452 if (breakpoint != NULL) {
1453 retval = xscale_unset_breakpoint(target, breakpoint);
1454 if (retval != ERROR_OK)
1455 return retval;
1456 }
1457
1458 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1459
1460 if (breakpoint)
1461 {
1462 xscale_set_breakpoint(target, breakpoint);
1463 }
1464
1465 LOG_DEBUG("target stepped");
1466
1467 return ERROR_OK;
1468
1469 }
1470
1471 static int xscale_assert_reset(struct target *target)
1472 {
1473 struct xscale_common *xscale = target_to_xscale(target);
1474
1475 LOG_DEBUG("target->state: %s",
1476 target_state_name(target));
1477
1478 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1479 * end up in T-L-R, which would reset JTAG
1480 */
1481 xscale_jtag_set_instr(target->tap,
1482 XSCALE_SELDCSR << xscale->xscale_variant,
1483 TAP_IDLE);
1484
1485 /* set Hold reset, Halt mode and Trap Reset */
1486 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1487 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1488 xscale_write_dcsr(target, 1, 0);
1489
1490 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1491 xscale_jtag_set_instr(target->tap, ~0, TAP_IDLE);
1492 jtag_execute_queue();
1493
1494 /* assert reset */
1495 jtag_add_reset(0, 1);
1496
1497 /* sleep 1ms, to be sure we fulfill any requirements */
1498 jtag_add_sleep(1000);
1499 jtag_execute_queue();
1500
1501 target->state = TARGET_RESET;
1502
1503 if (target->reset_halt)
1504 {
1505 int retval;
1506 if ((retval = target_halt(target)) != ERROR_OK)
1507 return retval;
1508 }
1509
1510 return ERROR_OK;
1511 }
1512
1513 static int xscale_deassert_reset(struct target *target)
1514 {
1515 struct xscale_common *xscale = target_to_xscale(target);
1516 struct breakpoint *breakpoint = target->breakpoints;
1517
1518 LOG_DEBUG("-");
1519
1520 xscale->ibcr_available = 2;
1521 xscale->ibcr0_used = 0;
1522 xscale->ibcr1_used = 0;
1523
1524 xscale->dbr_available = 2;
1525 xscale->dbr0_used = 0;
1526 xscale->dbr1_used = 0;
1527
1528 /* mark all hardware breakpoints as unset */
1529 while (breakpoint)
1530 {
1531 if (breakpoint->type == BKPT_HARD)
1532 {
1533 breakpoint->set = 0;
1534 }
1535 breakpoint = breakpoint->next;
1536 }
1537
1538 register_cache_invalidate(xscale->armv4_5_common.core_cache);
1539
1540 /* FIXME mark hardware watchpoints got unset too. Also,
1541 * at least some of the XScale registers are invalid...
1542 */
1543
1544 /*
1545 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1546 * contents got invalidated. Safer to force that, so writing new
1547 * contents can't ever fail..
1548 */
1549 {
1550 uint32_t address;
1551 unsigned buf_cnt;
1552 const uint8_t *buffer = xscale_debug_handler;
1553 int retval;
1554
1555 /* release SRST */
1556 jtag_add_reset(0, 0);
1557
1558 /* wait 300ms; 150 and 100ms were not enough */
1559 jtag_add_sleep(300*1000);
1560
1561 jtag_add_runtest(2030, TAP_IDLE);
1562 jtag_execute_queue();
1563
1564 /* set Hold reset, Halt mode and Trap Reset */
1565 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1566 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1567 xscale_write_dcsr(target, 1, 0);
1568
1569 /* Load the debug handler into the mini-icache. Since
1570 * it's using halt mode (not monitor mode), it runs in
1571 * "Special Debug State" for access to registers, memory,
1572 * coprocessors, trace data, etc.
1573 */
1574 address = xscale->handler_address;
1575 for (unsigned binary_size = sizeof xscale_debug_handler - 1;
1576 binary_size > 0;
1577 binary_size -= buf_cnt, buffer += buf_cnt)
1578 {
1579 uint32_t cache_line[8];
1580 unsigned i;
1581
1582 buf_cnt = binary_size;
1583 if (buf_cnt > 32)
1584 buf_cnt = 32;
1585
1586 for (i = 0; i < buf_cnt; i += 4)
1587 {
1588 /* convert LE buffer to host-endian uint32_t */
1589 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1590 }
1591
1592 for (; i < 32; i += 4)
1593 {
1594 cache_line[i / 4] = 0xe1a08008;
1595 }
1596
1597 /* only load addresses other than the reset vectors */
1598 if ((address % 0x400) != 0x0)
1599 {
1600 retval = xscale_load_ic(target, address,
1601 cache_line);
1602 if (retval != ERROR_OK)
1603 return retval;
1604 }
1605
1606 address += buf_cnt;
1607 };
1608
1609 retval = xscale_load_ic(target, 0x0,
1610 xscale->low_vectors);
1611 if (retval != ERROR_OK)
1612 return retval;
1613 retval = xscale_load_ic(target, 0xffff0000,
1614 xscale->high_vectors);
1615 if (retval != ERROR_OK)
1616 return retval;
1617
1618 jtag_add_runtest(30, TAP_IDLE);
1619
1620 jtag_add_sleep(100000);
1621
1622 /* set Hold reset, Halt mode and Trap Reset */
1623 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1624 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1625 xscale_write_dcsr(target, 1, 0);
1626
1627 /* clear Hold reset to let the target run (should enter debug handler) */
1628 xscale_write_dcsr(target, 0, 1);
1629 target->state = TARGET_RUNNING;
1630
1631 if (!target->reset_halt)
1632 {
1633 jtag_add_sleep(10000);
1634
1635 /* we should have entered debug now */
1636 xscale_debug_entry(target);
1637 target->state = TARGET_HALTED;
1638
1639 /* resume the target */
1640 xscale_resume(target, 1, 0x0, 1, 0);
1641 }
1642 }
1643
1644 return ERROR_OK;
1645 }
1646
1647 static int xscale_read_core_reg(struct target *target, struct reg *r,
1648 int num, enum arm_mode mode)
1649 {
1650 /** \todo add debug handler support for core register reads */
1651 LOG_ERROR("not implemented");
1652 return ERROR_OK;
1653 }
1654
1655 static int xscale_write_core_reg(struct target *target, struct reg *r,
1656 int num, enum arm_mode mode, uint32_t value)
1657 {
1658 /** \todo add debug handler support for core register writes */
1659 LOG_ERROR("not implemented");
1660 return ERROR_OK;
1661 }
1662
1663 static int xscale_full_context(struct target *target)
1664 {
1665 struct arm *armv4_5 = target_to_arm(target);
1666
1667 uint32_t *buffer;
1668
1669 int i, j;
1670
1671 LOG_DEBUG("-");
1672
1673 if (target->state != TARGET_HALTED)
1674 {
1675 LOG_WARNING("target not halted");
1676 return ERROR_TARGET_NOT_HALTED;
1677 }
1678
1679 buffer = malloc(4 * 8);
1680
1681 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1682 * we can't enter User mode on an XScale (unpredictable),
1683 * but User shares registers with SYS
1684 */
1685 for (i = 1; i < 7; i++)
1686 {
1687 enum arm_mode mode = armv4_5_number_to_mode(i);
1688 bool valid = true;
1689 struct reg *r;
1690
1691 if (mode == ARM_MODE_USR)
1692 continue;
1693
1694 /* check if there are invalid registers in the current mode
1695 */
1696 for (j = 0; valid && j <= 16; j++)
1697 {
1698 if (!ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1699 mode, j).valid)
1700 valid = false;
1701 }
1702 if (valid)
1703 continue;
1704
1705 /* request banked registers */
1706 xscale_send_u32(target, 0x0);
1707
1708 /* send CPSR for desired bank mode */
1709 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1710
1711 /* get banked registers: r8 to r14; and SPSR
1712 * except in USR/SYS mode
1713 */
1714 if (mode != ARM_MODE_SYS) {
1715 /* SPSR */
1716 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1717 mode, 16);
1718
1719 xscale_receive(target, buffer, 8);
1720
1721 buf_set_u32(r->value, 0, 32, buffer[7]);
1722 r->dirty = false;
1723 r->valid = true;
1724 } else {
1725 xscale_receive(target, buffer, 7);
1726 }
1727
1728 /* move data from buffer to register cache */
1729 for (j = 8; j <= 14; j++)
1730 {
1731 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1732 mode, j);
1733
1734 buf_set_u32(r->value, 0, 32, buffer[j - 8]);
1735 r->dirty = false;
1736 r->valid = true;
1737 }
1738 }
1739
1740 free(buffer);
1741
1742 return ERROR_OK;
1743 }
1744
1745 static int xscale_restore_banked(struct target *target)
1746 {
1747 struct arm *armv4_5 = target_to_arm(target);
1748
1749 int i, j;
1750
1751 if (target->state != TARGET_HALTED)
1752 {
1753 LOG_WARNING("target not halted");
1754 return ERROR_TARGET_NOT_HALTED;
1755 }
1756
1757 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1758 * and check if any banked registers need to be written. Ignore
1759 * USR mode (number 0) in favor of SYS; we can't enter User mode on
1760 * an XScale (unpredictable), but they share all registers.
1761 */
1762 for (i = 1; i < 7; i++)
1763 {
1764 enum arm_mode mode = armv4_5_number_to_mode(i);
1765 struct reg *r;
1766
1767 if (mode == ARM_MODE_USR)
1768 continue;
1769
1770 /* check if there are dirty registers in this mode */
1771 for (j = 8; j <= 14; j++)
1772 {
1773 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1774 mode, j).dirty)
1775 goto dirty;
1776 }
1777
1778 /* if not USR/SYS, check if the SPSR needs to be written */
1779 if (mode != ARM_MODE_SYS)
1780 {
1781 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1782 mode, 16).dirty)
1783 goto dirty;
1784 }
1785
1786 /* there's nothing to flush for this mode */
1787 continue;
1788
1789 dirty:
1790 /* command 0x1: "send banked registers" */
1791 xscale_send_u32(target, 0x1);
1792
1793 /* send CPSR for desired mode */
1794 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1795
1796 /* send r8 to r14/lr ... only FIQ needs more than r13..r14,
1797 * but this protocol doesn't understand that nuance.
1798 */
1799 for (j = 8; j <= 14; j++) {
1800 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1801 mode, j);
1802 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1803 r->dirty = false;
1804 }
1805
1806 /* send spsr if not in USR/SYS mode */
1807 if (mode != ARM_MODE_SYS) {
1808 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1809 mode, 16);
1810 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1811 r->dirty = false;
1812 }
1813 }
1814
1815 return ERROR_OK;
1816 }
1817
1818 static int xscale_read_memory(struct target *target, uint32_t address,
1819 uint32_t size, uint32_t count, uint8_t *buffer)
1820 {
1821 struct xscale_common *xscale = target_to_xscale(target);
1822 uint32_t *buf32;
1823 uint32_t i;
1824 int retval;
1825
1826 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1827
1828 if (target->state != TARGET_HALTED)
1829 {
1830 LOG_WARNING("target not halted");
1831 return ERROR_TARGET_NOT_HALTED;
1832 }
1833
1834 /* sanitize arguments */
1835 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1836 return ERROR_INVALID_ARGUMENTS;
1837
1838 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1839 return ERROR_TARGET_UNALIGNED_ACCESS;
1840
1841 /* send memory read request (command 0x1n, n: access size) */
1842 if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
1843 return retval;
1844
1845 /* send base address for read request */
1846 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1847 return retval;
1848
1849 /* send number of requested data words */
1850 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1851 return retval;
1852
1853 /* receive data from target (count times 32-bit words in host endianness) */
1854 buf32 = malloc(4 * count);
1855 if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
1856 return retval;
1857
1858 /* extract data from host-endian buffer into byte stream */
1859 for (i = 0; i < count; i++)
1860 {
1861 switch (size)
1862 {
1863 case 4:
1864 target_buffer_set_u32(target, buffer, buf32[i]);
1865 buffer += 4;
1866 break;
1867 case 2:
1868 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1869 buffer += 2;
1870 break;
1871 case 1:
1872 *buffer++ = buf32[i] & 0xff;
1873 break;
1874 default:
1875 LOG_ERROR("invalid read size");
1876 return ERROR_INVALID_ARGUMENTS;
1877 }
1878 }
1879
1880 free(buf32);
1881
1882 /* examine DCSR, to see if Sticky Abort (SA) got set */
1883 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1884 return retval;
1885 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1886 {
1887 /* clear SA bit */
1888 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1889 return retval;
1890
1891 return ERROR_TARGET_DATA_ABORT;
1892 }
1893
1894 return ERROR_OK;
1895 }
1896
1897 static int xscale_read_phys_memory(struct target *target, uint32_t address,
1898 uint32_t size, uint32_t count, uint8_t *buffer)
1899 {
1900 struct xscale_common *xscale = target_to_xscale(target);
1901
1902 /* with MMU inactive, there are only physical addresses */
1903 if (!xscale->armv4_5_mmu.mmu_enabled)
1904 return xscale_read_memory(target, address, size, count, buffer);
1905
1906 /** \todo: provide a non-stub implementation of this routine. */
1907 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1908 target_name(target), __func__);
1909 return ERROR_FAIL;
1910 }
1911
1912 static int xscale_write_memory(struct target *target, uint32_t address,
1913 uint32_t size, uint32_t count, uint8_t *buffer)
1914 {
1915 struct xscale_common *xscale = target_to_xscale(target);
1916 int retval;
1917
1918 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1919
1920 if (target->state != TARGET_HALTED)
1921 {
1922 LOG_WARNING("target not halted");
1923 return ERROR_TARGET_NOT_HALTED;
1924 }
1925
1926 /* sanitize arguments */
1927 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1928 return ERROR_INVALID_ARGUMENTS;
1929
1930 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1931 return ERROR_TARGET_UNALIGNED_ACCESS;
1932
1933 /* send memory write request (command 0x2n, n: access size) */
1934 if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
1935 return retval;
1936
1937 /* send base address for read request */
1938 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1939 return retval;
1940
1941 /* send number of requested data words to be written*/
1942 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1943 return retval;
1944
1945 /* extract data from host-endian buffer into byte stream */
1946 #if 0
1947 for (i = 0; i < count; i++)
1948 {
1949 switch (size)
1950 {
1951 case 4:
1952 value = target_buffer_get_u32(target, buffer);
1953 xscale_send_u32(target, value);
1954 buffer += 4;
1955 break;
1956 case 2:
1957 value = target_buffer_get_u16(target, buffer);
1958 xscale_send_u32(target, value);
1959 buffer += 2;
1960 break;
1961 case 1:
1962 value = *buffer;
1963 xscale_send_u32(target, value);
1964 buffer += 1;
1965 break;
1966 default:
1967 LOG_ERROR("should never get here");
1968 exit(-1);
1969 }
1970 }
1971 #endif
1972 if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
1973 return retval;
1974
1975 /* examine DCSR, to see if Sticky Abort (SA) got set */
1976 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1977 return retval;
1978 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1979 {
1980 /* clear SA bit */
1981 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1982 return retval;
1983
1984 return ERROR_TARGET_DATA_ABORT;
1985 }
1986
1987 return ERROR_OK;
1988 }
1989
1990 static int xscale_write_phys_memory(struct target *target, uint32_t address,
1991 uint32_t size, uint32_t count, uint8_t *buffer)
1992 {
1993 struct xscale_common *xscale = target_to_xscale(target);
1994
1995 /* with MMU inactive, there are only physical addresses */
1996 if (!xscale->armv4_5_mmu.mmu_enabled)
1997 return xscale_read_memory(target, address, size, count, buffer);
1998
1999 /** \todo: provide a non-stub implementation of this routine. */
2000 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
2001 target_name(target), __func__);
2002 return ERROR_FAIL;
2003 }
2004
2005 static int xscale_bulk_write_memory(struct target *target, uint32_t address,
2006 uint32_t count, uint8_t *buffer)
2007 {
2008 return xscale_write_memory(target, address, 4, count, buffer);
2009 }
2010
2011 static uint32_t xscale_get_ttb(struct target *target)
2012 {
2013 struct xscale_common *xscale = target_to_xscale(target);
2014 uint32_t ttb;
2015
2016 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
2017 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
2018
2019 return ttb;
2020 }
2021
2022 static void xscale_disable_mmu_caches(struct target *target, int mmu,
2023 int d_u_cache, int i_cache)
2024 {
2025 struct xscale_common *xscale = target_to_xscale(target);
2026 uint32_t cp15_control;
2027
2028 /* read cp15 control register */
2029 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2030 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2031
2032 if (mmu)
2033 cp15_control &= ~0x1U;
2034
2035 if (d_u_cache)
2036 {
2037 /* clean DCache */
2038 xscale_send_u32(target, 0x50);
2039 xscale_send_u32(target, xscale->cache_clean_address);
2040
2041 /* invalidate DCache */
2042 xscale_send_u32(target, 0x51);
2043
2044 cp15_control &= ~0x4U;
2045 }
2046
2047 if (i_cache)
2048 {
2049 /* invalidate ICache */
2050 xscale_send_u32(target, 0x52);
2051 cp15_control &= ~0x1000U;
2052 }
2053
2054 /* write new cp15 control register */
2055 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2056
2057 /* execute cpwait to ensure outstanding operations complete */
2058 xscale_send_u32(target, 0x53);
2059 }
2060
2061 static void xscale_enable_mmu_caches(struct target *target, int mmu,
2062 int d_u_cache, int i_cache)
2063 {
2064 struct xscale_common *xscale = target_to_xscale(target);
2065 uint32_t cp15_control;
2066
2067 /* read cp15 control register */
2068 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2069 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2070
2071 if (mmu)
2072 cp15_control |= 0x1U;
2073
2074 if (d_u_cache)
2075 cp15_control |= 0x4U;
2076
2077 if (i_cache)
2078 cp15_control |= 0x1000U;
2079
2080 /* write new cp15 control register */
2081 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2082
2083 /* execute cpwait to ensure outstanding operations complete */
2084 xscale_send_u32(target, 0x53);
2085 }
2086
2087 static int xscale_set_breakpoint(struct target *target,
2088 struct breakpoint *breakpoint)
2089 {
2090 int retval;
2091 struct xscale_common *xscale = target_to_xscale(target);
2092
2093 if (target->state != TARGET_HALTED)
2094 {
2095 LOG_WARNING("target not halted");
2096 return ERROR_TARGET_NOT_HALTED;
2097 }
2098
2099 if (breakpoint->set)
2100 {
2101 LOG_WARNING("breakpoint already set");
2102 return ERROR_OK;
2103 }
2104
2105 if (breakpoint->type == BKPT_HARD)
2106 {
2107 uint32_t value = breakpoint->address | 1;
2108 if (!xscale->ibcr0_used)
2109 {
2110 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2111 xscale->ibcr0_used = 1;
2112 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2113 }
2114 else if (!xscale->ibcr1_used)
2115 {
2116 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2117 xscale->ibcr1_used = 1;
2118 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2119 }
2120 else
2121 {
2122 LOG_ERROR("BUG: no hardware comparator available");
2123 return ERROR_OK;
2124 }
2125 }
2126 else if (breakpoint->type == BKPT_SOFT)
2127 {
2128 if (breakpoint->length == 4)
2129 {
2130 /* keep the original instruction in target endianness */
2131 if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2132 {
2133 return retval;
2134 }
2135 /* write the bkpt instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2136 if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2137 {
2138 return retval;
2139 }
2140 }
2141 else
2142 {
2143 /* keep the original instruction in target endianness */
2144 if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2145 {
2146 return retval;
2147 }
2148 /* write the bkpt instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2149 if ((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2150 {
2151 return retval;
2152 }
2153 }
2154 breakpoint->set = 1;
2155
2156 xscale_send_u32(target, 0x50); /* clean dcache */
2157 xscale_send_u32(target, xscale->cache_clean_address);
2158 xscale_send_u32(target, 0x51); /* invalidate dcache */
2159 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2160 }
2161
2162 return ERROR_OK;
2163 }
2164
2165 static int xscale_add_breakpoint(struct target *target,
2166 struct breakpoint *breakpoint)
2167 {
2168 struct xscale_common *xscale = target_to_xscale(target);
2169
2170 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2171 {
2172 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2173 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2174 }
2175
2176 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2177 {
2178 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2179 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2180 }
2181
2182 if (breakpoint->type == BKPT_HARD)
2183 {
2184 xscale->ibcr_available--;
2185 }
2186
2187 return ERROR_OK;
2188 }
2189
2190 static int xscale_unset_breakpoint(struct target *target,
2191 struct breakpoint *breakpoint)
2192 {
2193 int retval;
2194 struct xscale_common *xscale = target_to_xscale(target);
2195
2196 if (target->state != TARGET_HALTED)
2197 {
2198 LOG_WARNING("target not halted");
2199 return ERROR_TARGET_NOT_HALTED;
2200 }
2201
2202 if (!breakpoint->set)
2203 {
2204 LOG_WARNING("breakpoint not set");
2205 return ERROR_OK;
2206 }
2207
2208 if (breakpoint->type == BKPT_HARD)
2209 {
2210 if (breakpoint->set == 1)
2211 {
2212 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2213 xscale->ibcr0_used = 0;
2214 }
2215 else if (breakpoint->set == 2)
2216 {
2217 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2218 xscale->ibcr1_used = 0;
2219 }
2220 breakpoint->set = 0;
2221 }
2222 else
2223 {
2224 /* restore original instruction (kept in target endianness) */
2225 if (breakpoint->length == 4)
2226 {
2227 if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2228 {
2229 return retval;
2230 }
2231 }
2232 else
2233 {
2234 if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2235 {
2236 return retval;
2237 }
2238 }
2239 breakpoint->set = 0;
2240
2241 xscale_send_u32(target, 0x50); /* clean dcache */
2242 xscale_send_u32(target, xscale->cache_clean_address);
2243 xscale_send_u32(target, 0x51); /* invalidate dcache */
2244 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2245 }
2246
2247 return ERROR_OK;
2248 }
2249
2250 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2251 {
2252 struct xscale_common *xscale = target_to_xscale(target);
2253
2254 if (target->state != TARGET_HALTED)
2255 {
2256 LOG_WARNING("target not halted");
2257 return ERROR_TARGET_NOT_HALTED;
2258 }
2259
2260 if (breakpoint->set)
2261 {
2262 xscale_unset_breakpoint(target, breakpoint);
2263 }
2264
2265 if (breakpoint->type == BKPT_HARD)
2266 xscale->ibcr_available++;
2267
2268 return ERROR_OK;
2269 }
2270
2271 static int xscale_set_watchpoint(struct target *target,
2272 struct watchpoint *watchpoint)
2273 {
2274 struct xscale_common *xscale = target_to_xscale(target);
2275 uint8_t enable = 0;
2276 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2277 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2278
2279 if (target->state != TARGET_HALTED)
2280 {
2281 LOG_WARNING("target not halted");
2282 return ERROR_TARGET_NOT_HALTED;
2283 }
2284
2285 xscale_get_reg(dbcon);
2286
2287 switch (watchpoint->rw)
2288 {
2289 case WPT_READ:
2290 enable = 0x3;
2291 break;
2292 case WPT_ACCESS:
2293 enable = 0x2;
2294 break;
2295 case WPT_WRITE:
2296 enable = 0x1;
2297 break;
2298 default:
2299 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2300 }
2301
2302 if (!xscale->dbr0_used)
2303 {
2304 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2305 dbcon_value |= enable;
2306 xscale_set_reg_u32(dbcon, dbcon_value);
2307 watchpoint->set = 1;
2308 xscale->dbr0_used = 1;
2309 }
2310 else if (!xscale->dbr1_used)
2311 {
2312 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2313 dbcon_value |= enable << 2;
2314 xscale_set_reg_u32(dbcon, dbcon_value);
2315 watchpoint->set = 2;
2316 xscale->dbr1_used = 1;
2317 }
2318 else
2319 {
2320 LOG_ERROR("BUG: no hardware comparator available");
2321 return ERROR_OK;
2322 }
2323
2324 return ERROR_OK;
2325 }
2326
2327 static int xscale_add_watchpoint(struct target *target,
2328 struct watchpoint *watchpoint)
2329 {
2330 struct xscale_common *xscale = target_to_xscale(target);
2331
2332 if (xscale->dbr_available < 1)
2333 {
2334 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2335 }
2336
2337 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2338 {
2339 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2340 }
2341
2342 xscale->dbr_available--;
2343
2344 return ERROR_OK;
2345 }
2346
2347 static int xscale_unset_watchpoint(struct target *target,
2348 struct watchpoint *watchpoint)
2349 {
2350 struct xscale_common *xscale = target_to_xscale(target);
2351 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2352 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2353
2354 if (target->state != TARGET_HALTED)
2355 {
2356 LOG_WARNING("target not halted");
2357 return ERROR_TARGET_NOT_HALTED;
2358 }
2359
2360 if (!watchpoint->set)
2361 {
2362 LOG_WARNING("breakpoint not set");
2363 return ERROR_OK;
2364 }
2365
2366 if (watchpoint->set == 1)
2367 {
2368 dbcon_value &= ~0x3;
2369 xscale_set_reg_u32(dbcon, dbcon_value);
2370 xscale->dbr0_used = 0;
2371 }
2372 else if (watchpoint->set == 2)
2373 {
2374 dbcon_value &= ~0xc;
2375 xscale_set_reg_u32(dbcon, dbcon_value);
2376 xscale->dbr1_used = 0;
2377 }
2378 watchpoint->set = 0;
2379
2380 return ERROR_OK;
2381 }
2382
2383 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2384 {
2385 struct xscale_common *xscale = target_to_xscale(target);
2386
2387 if (target->state != TARGET_HALTED)
2388 {
2389 LOG_WARNING("target not halted");
2390 return ERROR_TARGET_NOT_HALTED;
2391 }
2392
2393 if (watchpoint->set)
2394 {
2395 xscale_unset_watchpoint(target, watchpoint);
2396 }
2397
2398 xscale->dbr_available++;
2399
2400 return ERROR_OK;
2401 }
2402
2403 static int xscale_get_reg(struct reg *reg)
2404 {
2405 struct xscale_reg *arch_info = reg->arch_info;
2406 struct target *target = arch_info->target;
2407 struct xscale_common *xscale = target_to_xscale(target);
2408
2409 /* DCSR, TX and RX are accessible via JTAG */
2410 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2411 {
2412 return xscale_read_dcsr(arch_info->target);
2413 }
2414 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2415 {
2416 /* 1 = consume register content */
2417 return xscale_read_tx(arch_info->target, 1);
2418 }
2419 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2420 {
2421 /* can't read from RX register (host -> debug handler) */
2422 return ERROR_OK;
2423 }
2424 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2425 {
2426 /* can't (explicitly) read from TXRXCTRL register */
2427 return ERROR_OK;
2428 }
2429 else /* Other DBG registers have to be transfered by the debug handler */
2430 {
2431 /* send CP read request (command 0x40) */
2432 xscale_send_u32(target, 0x40);
2433
2434 /* send CP register number */
2435 xscale_send_u32(target, arch_info->dbg_handler_number);
2436
2437 /* read register value */
2438 xscale_read_tx(target, 1);
2439 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2440
2441 reg->dirty = 0;
2442 reg->valid = 1;
2443 }
2444
2445 return ERROR_OK;
2446 }
2447
2448 static int xscale_set_reg(struct reg *reg, uint8_t* buf)
2449 {
2450 struct xscale_reg *arch_info = reg->arch_info;
2451 struct target *target = arch_info->target;
2452 struct xscale_common *xscale = target_to_xscale(target);
2453 uint32_t value = buf_get_u32(buf, 0, 32);
2454
2455 /* DCSR, TX and RX are accessible via JTAG */
2456 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2457 {
2458 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2459 return xscale_write_dcsr(arch_info->target, -1, -1);
2460 }
2461 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2462 {
2463 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2464 return xscale_write_rx(arch_info->target);
2465 }
2466 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2467 {
2468 /* can't write to TX register (debug-handler -> host) */
2469 return ERROR_OK;
2470 }
2471 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2472 {
2473 /* can't (explicitly) write to TXRXCTRL register */
2474 return ERROR_OK;
2475 }
2476 else /* Other DBG registers have to be transfered by the debug handler */
2477 {
2478 /* send CP write request (command 0x41) */
2479 xscale_send_u32(target, 0x41);
2480
2481 /* send CP register number */
2482 xscale_send_u32(target, arch_info->dbg_handler_number);
2483
2484 /* send CP register value */
2485 xscale_send_u32(target, value);
2486 buf_set_u32(reg->value, 0, 32, value);
2487 }
2488
2489 return ERROR_OK;
2490 }
2491
2492 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2493 {
2494 struct xscale_common *xscale = target_to_xscale(target);
2495 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2496 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2497
2498 /* send CP write request (command 0x41) */
2499 xscale_send_u32(target, 0x41);
2500
2501 /* send CP register number */
2502 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2503
2504 /* send CP register value */
2505 xscale_send_u32(target, value);
2506 buf_set_u32(dcsr->value, 0, 32, value);
2507
2508 return ERROR_OK;
2509 }
2510
2511 static int xscale_read_trace(struct target *target)
2512 {
2513 struct xscale_common *xscale = target_to_xscale(target);
2514 struct arm *armv4_5 = &xscale->armv4_5_common;
2515 struct xscale_trace_data **trace_data_p;
2516
2517 /* 258 words from debug handler
2518 * 256 trace buffer entries
2519 * 2 checkpoint addresses
2520 */
2521 uint32_t trace_buffer[258];
2522 int is_address[256];
2523 int i, j;
2524
2525 if (target->state != TARGET_HALTED)
2526 {
2527 LOG_WARNING("target must be stopped to read trace data");
2528 return ERROR_TARGET_NOT_HALTED;
2529 }
2530
2531 /* send read trace buffer command (command 0x61) */
2532 xscale_send_u32(target, 0x61);
2533
2534 /* receive trace buffer content */
2535 xscale_receive(target, trace_buffer, 258);
2536
2537 /* parse buffer backwards to identify address entries */
2538 for (i = 255; i >= 0; i--)
2539 {
2540 is_address[i] = 0;
2541 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2542 ((trace_buffer[i] & 0xf0) == 0xd0))
2543 {
2544 if (i >= 3)
2545 is_address[--i] = 1;
2546 if (i >= 2)
2547 is_address[--i] = 1;
2548 if (i >= 1)
2549 is_address[--i] = 1;
2550 if (i >= 0)
2551 is_address[--i] = 1;
2552 }
2553 }
2554
2555
2556 /* search first non-zero entry */
2557 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2558 ;
2559
2560 if (j == 256)
2561 {
2562 LOG_DEBUG("no trace data collected");
2563 return ERROR_XSCALE_NO_TRACE_DATA;
2564 }
2565
2566 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2567 ;
2568
2569 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2570 (*trace_data_p)->next = NULL;
2571 (*trace_data_p)->chkpt0 = trace_buffer[256];
2572 (*trace_data_p)->chkpt1 = trace_buffer[257];
2573 (*trace_data_p)->last_instruction =
2574 buf_get_u32(armv4_5->pc->value, 0, 32);
2575 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2576 (*trace_data_p)->depth = 256 - j;
2577
2578 for (i = j; i < 256; i++)
2579 {
2580 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2581 if (is_address[i])
2582 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2583 else
2584 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2585 }
2586
2587 return ERROR_OK;
2588 }
2589
2590 static int xscale_read_instruction(struct target *target,
2591 struct arm_instruction *instruction)
2592 {
2593 struct xscale_common *xscale = target_to_xscale(target);
2594 int i;
2595 int section = -1;
2596 size_t size_read;
2597 uint32_t opcode;
2598 int retval;
2599
2600 if (!xscale->trace.image)
2601 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2602
2603 /* search for the section the current instruction belongs to */
2604 for (i = 0; i < xscale->trace.image->num_sections; i++)
2605 {
2606 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2607 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2608 {
2609 section = i;
2610 break;
2611 }
2612 }
2613
2614 if (section == -1)
2615 {
2616 /* current instruction couldn't be found in the image */
2617 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2618 }
2619
2620 if (xscale->trace.core_state == ARM_STATE_ARM)
2621 {
2622 uint8_t buf[4];
2623 if ((retval = image_read_section(xscale->trace.image, section,
2624 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2625 4, buf, &size_read)) != ERROR_OK)
2626 {
2627 LOG_ERROR("error while reading instruction: %i", retval);
2628 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2629 }
2630 opcode = target_buffer_get_u32(target, buf);
2631 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2632 }
2633 else if (xscale->trace.core_state == ARM_STATE_THUMB)
2634 {
2635 uint8_t buf[2];
2636 if ((retval = image_read_section(xscale->trace.image, section,
2637 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2638 2, buf, &size_read)) != ERROR_OK)
2639 {
2640 LOG_ERROR("error while reading instruction: %i", retval);
2641 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2642 }
2643 opcode = target_buffer_get_u16(target, buf);
2644 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2645 }
2646 else
2647 {
2648 LOG_ERROR("BUG: unknown core state encountered");
2649 exit(-1);
2650 }
2651
2652 return ERROR_OK;
2653 }
2654
2655 static int xscale_branch_address(struct xscale_trace_data *trace_data,
2656 int i, uint32_t *target)
2657 {
2658 /* if there are less than four entries prior to the indirect branch message
2659 * we can't extract the address */
2660 if (i < 4)
2661 {
2662 return -1;
2663 }
2664
2665 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2666 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2667
2668 return 0;
2669 }
2670
2671 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2672 {
2673 struct xscale_common *xscale = target_to_xscale(target);
2674 int next_pc_ok = 0;
2675 uint32_t next_pc = 0x0;
2676 struct xscale_trace_data *trace_data = xscale->trace.data;
2677 int retval;
2678
2679 while (trace_data)
2680 {
2681 int i, chkpt;
2682 int rollover;
2683 int branch;
2684 int exception;
2685 xscale->trace.core_state = ARM_STATE_ARM;
2686
2687 chkpt = 0;
2688 rollover = 0;
2689
2690 for (i = 0; i < trace_data->depth; i++)
2691 {
2692 next_pc_ok = 0;
2693 branch = 0;
2694 exception = 0;
2695
2696 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2697 continue;
2698
2699 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2700 {
2701 case 0: /* Exceptions */
2702 case 1:
2703 case 2:
2704 case 3:
2705 case 4:
2706 case 5:
2707 case 6:
2708 case 7:
2709 exception = (trace_data->entries[i].data & 0x70) >> 4;
2710 next_pc_ok = 1;
2711 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2712 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2713 break;
2714 case 8: /* Direct Branch */
2715 branch = 1;
2716 break;
2717 case 9: /* Indirect Branch */
2718 branch = 1;
2719 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2720 {
2721 next_pc_ok = 1;
2722 }
2723 break;
2724 case 13: /* Checkpointed Indirect Branch */
2725 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2726 {
2727 next_pc_ok = 1;
2728 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2729 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2730 LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2731 }
2732 /* explicit fall-through */
2733 case 12: /* Checkpointed Direct Branch */
2734 branch = 1;
2735 if (chkpt == 0)
2736 {
2737 next_pc_ok = 1;
2738 next_pc = trace_data->chkpt0;
2739 chkpt++;
2740 }
2741 else if (chkpt == 1)
2742 {
2743 next_pc_ok = 1;
2744 next_pc = trace_data->chkpt0;
2745 chkpt++;
2746 }
2747 else
2748 {
2749 LOG_WARNING("more than two checkpointed branches encountered");
2750 }
2751 break;
2752 case 15: /* Roll-over */
2753 rollover++;
2754 continue;
2755 default: /* Reserved */
2756 command_print(cmd_ctx, "--- reserved trace message ---");
2757 LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2758 return ERROR_OK;
2759 }
2760
2761 if (xscale->trace.pc_ok)
2762 {
2763 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2764 struct arm_instruction instruction;
2765
2766 if ((exception == 6) || (exception == 7))
2767 {
2768 /* IRQ or FIQ exception, no instruction executed */
2769 executed -= 1;
2770 }
2771
2772 while (executed-- >= 0)
2773 {
2774 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2775 {
2776 /* can't continue tracing with no image available */
2777 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2778 {
2779 return retval;
2780 }
2781 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2782 {
2783 /* TODO: handle incomplete images */
2784 }
2785 }
2786
2787 /* a precise abort on a load to the PC is included in the incremental
2788 * word count, other instructions causing data aborts are not included
2789 */
2790 if ((executed == 0) && (exception == 4)
2791 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2792 {
2793 if ((instruction.type == ARM_LDM)
2794 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2795 {
2796 executed--;
2797 }
2798 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2799 && (instruction.info.load_store.Rd != 15))
2800 {
2801 executed--;
2802 }
2803 }
2804
2805 /* only the last instruction executed
2806 * (the one that caused the control flow change)
2807 * could be a taken branch
2808 */
2809 if (((executed == -1) && (branch == 1)) &&
2810 (((instruction.type == ARM_B) ||
2811 (instruction.type == ARM_BL) ||
2812 (instruction.type == ARM_BLX)) &&
2813 (instruction.info.b_bl_bx_blx.target_address != 0xffffffff)))
2814 {
2815 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2816 }
2817 else
2818 {
2819 xscale->trace.current_pc += (xscale->trace.core_state == ARM_STATE_ARM) ? 4 : 2;
2820 }
2821 command_print(cmd_ctx, "%s", instruction.text);
2822 }
2823
2824 rollover = 0;
2825 }
2826
2827 if (next_pc_ok)
2828 {
2829 xscale->trace.current_pc = next_pc;
2830 xscale->trace.pc_ok = 1;
2831 }
2832 }
2833
2834 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARM_STATE_ARM) ? 4 : 2)
2835 {
2836 struct arm_instruction instruction;
2837 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2838 {
2839 /* can't continue tracing with no image available */
2840 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2841 {
2842 return retval;
2843 }
2844 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2845 {
2846 /* TODO: handle incomplete images */
2847 }
2848 }
2849 command_print(cmd_ctx, "%s", instruction.text);
2850 }
2851
2852 trace_data = trace_data->next;
2853 }
2854
2855 return ERROR_OK;
2856 }
2857
2858 static const struct reg_arch_type xscale_reg_type = {
2859 .get = xscale_get_reg,
2860 .set = xscale_set_reg,
2861 };
2862
2863 static void xscale_build_reg_cache(struct target *target)
2864 {
2865 struct xscale_common *xscale = target_to_xscale(target);
2866 struct arm *armv4_5 = &xscale->armv4_5_common;
2867 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2868 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2869 int i;
2870 int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
2871
2872 (*cache_p) = arm_build_reg_cache(target, armv4_5);
2873
2874 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2875 cache_p = &(*cache_p)->next;
2876
2877 /* fill in values for the xscale reg cache */
2878 (*cache_p)->name = "XScale registers";
2879 (*cache_p)->next = NULL;
2880 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
2881 (*cache_p)->num_regs = num_regs;
2882
2883 for (i = 0; i < num_regs; i++)
2884 {
2885 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2886 (*cache_p)->reg_list[i].value = calloc(4, 1);
2887 (*cache_p)->reg_list[i].dirty = 0;
2888 (*cache_p)->reg_list[i].valid = 0;
2889 (*cache_p)->reg_list[i].size = 32;
2890 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2891 (*cache_p)->reg_list[i].type = &xscale_reg_type;
2892 arch_info[i] = xscale_reg_arch_info[i];
2893 arch_info[i].target = target;
2894 }
2895
2896 xscale->reg_cache = (*cache_p);
2897 }
2898
2899 static int xscale_init_target(struct command_context *cmd_ctx,
2900 struct target *target)
2901 {
2902 xscale_build_reg_cache(target);
2903 return ERROR_OK;
2904 }
2905
2906 static int xscale_init_arch_info(struct target *target,
2907 struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
2908 {
2909 struct arm *armv4_5;
2910 uint32_t high_reset_branch, low_reset_branch;
2911 int i;
2912
2913 armv4_5 = &xscale->armv4_5_common;
2914
2915 /* store architecture specfic data */
2916 xscale->common_magic = XSCALE_COMMON_MAGIC;
2917
2918 /* we don't really *need* a variant param ... */
2919 if (variant) {
2920 int ir_length = 0;
2921
2922 if (strcmp(variant, "pxa250") == 0
2923 || strcmp(variant, "pxa255") == 0
2924 || strcmp(variant, "pxa26x") == 0)
2925 ir_length = 5;
2926 else if (strcmp(variant, "pxa27x") == 0
2927 || strcmp(variant, "ixp42x") == 0
2928 || strcmp(variant, "ixp45x") == 0
2929 || strcmp(variant, "ixp46x") == 0)
2930 ir_length = 7;
2931 else if (strcmp(variant, "pxa3xx") == 0)
2932 ir_length = 11;
2933 else
2934 LOG_WARNING("%s: unrecognized variant %s",
2935 tap->dotted_name, variant);
2936
2937 if (ir_length && ir_length != tap->ir_length) {
2938 LOG_WARNING("%s: IR length for %s is %d; fixing",
2939 tap->dotted_name, variant, ir_length);
2940 tap->ir_length = ir_length;
2941 }
2942 }
2943
2944 /* PXA3xx shifts the JTAG instructions */
2945 if (tap->ir_length == 11)
2946 xscale->xscale_variant = XSCALE_PXA3XX;
2947 else
2948 xscale->xscale_variant = XSCALE_IXP4XX_PXA2XX;
2949
2950 /* the debug handler isn't installed (and thus not running) at this time */
2951 xscale->handler_address = 0xfe000800;
2952
2953 /* clear the vectors we keep locally for reference */
2954 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2955 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2956
2957 /* no user-specified vectors have been configured yet */
2958 xscale->static_low_vectors_set = 0x0;
2959 xscale->static_high_vectors_set = 0x0;
2960
2961 /* calculate branches to debug handler */
2962 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
2963 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
2964
2965 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
2966 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
2967
2968 for (i = 1; i <= 7; i++)
2969 {
2970 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2971 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2972 }
2973
2974 /* 64kB aligned region used for DCache cleaning */
2975 xscale->cache_clean_address = 0xfffe0000;
2976
2977 xscale->hold_rst = 0;
2978 xscale->external_debug_break = 0;
2979
2980 xscale->ibcr_available = 2;
2981 xscale->ibcr0_used = 0;
2982 xscale->ibcr1_used = 0;
2983
2984 xscale->dbr_available = 2;
2985 xscale->dbr0_used = 0;
2986 xscale->dbr1_used = 0;
2987
2988 LOG_INFO("%s: hardware has 2 breakpoints and 2 watchpoints",
2989 target_name(target));
2990
2991 xscale->arm_bkpt = ARMV5_BKPT(0x0);
2992 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
2993
2994 xscale->vector_catch = 0x1;
2995
2996 xscale->trace.capture_status = TRACE_IDLE;
2997 xscale->trace.data = NULL;
2998 xscale->trace.image = NULL;
2999 xscale->trace.buffer_enabled = 0;
3000 xscale->trace.buffer_fill = 0;
3001
3002 /* prepare ARMv4/5 specific information */
3003 armv4_5->arch_info = xscale;
3004 armv4_5->read_core_reg = xscale_read_core_reg;
3005 armv4_5->write_core_reg = xscale_write_core_reg;
3006 armv4_5->full_context = xscale_full_context;
3007
3008 arm_init_arch_info(target, armv4_5);
3009
3010 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
3011 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
3012 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
3013 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
3014 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
3015 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
3016 xscale->armv4_5_mmu.has_tiny_pages = 1;
3017 xscale->armv4_5_mmu.mmu_enabled = 0;
3018
3019 return ERROR_OK;
3020 }
3021
3022 static int xscale_target_create(struct target *target, Jim_Interp *interp)
3023 {
3024 struct xscale_common *xscale;
3025
3026 if (sizeof xscale_debug_handler - 1 > 0x800) {
3027 LOG_ERROR("debug_handler.bin: larger than 2kb");
3028 return ERROR_FAIL;
3029 }
3030
3031 xscale = calloc(1, sizeof(*xscale));
3032 if (!xscale)
3033 return ERROR_FAIL;
3034
3035 return xscale_init_arch_info(target, xscale, target->tap,
3036 target->variant);
3037 }
3038
3039 COMMAND_HANDLER(xscale_handle_debug_handler_command)
3040 {
3041 struct target *target = NULL;
3042 struct xscale_common *xscale;
3043 int retval;
3044 uint32_t handler_address;
3045
3046 if (CMD_ARGC < 2)
3047 {
3048 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3049 return ERROR_OK;
3050 }
3051
3052 if ((target = get_target(CMD_ARGV[0])) == NULL)
3053 {
3054 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3055 return ERROR_FAIL;
3056 }
3057
3058 xscale = target_to_xscale(target);
3059 retval = xscale_verify_pointer(CMD_CTX, xscale);
3060 if (retval != ERROR_OK)
3061 return retval;
3062
3063 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3064
3065 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3066 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3067 {
3068 xscale->handler_address = handler_address;
3069 }
3070 else
3071 {
3072 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3073 return ERROR_FAIL;
3074 }
3075
3076 return ERROR_OK;
3077 }
3078
3079 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3080 {
3081 struct target *target = NULL;
3082 struct xscale_common *xscale;
3083 int retval;
3084 uint32_t cache_clean_address;
3085
3086 if (CMD_ARGC < 2)
3087 {
3088 return ERROR_COMMAND_SYNTAX_ERROR;
3089 }
3090
3091 target = get_target(CMD_ARGV[0]);
3092 if (target == NULL)
3093 {
3094 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3095 return ERROR_FAIL;
3096 }
3097 xscale = target_to_xscale(target);
3098 retval = xscale_verify_pointer(CMD_CTX, xscale);
3099 if (retval != ERROR_OK)
3100 return retval;
3101
3102 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3103
3104 if (cache_clean_address & 0xffff)
3105 {
3106 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3107 }
3108 else
3109 {
3110 xscale->cache_clean_address = cache_clean_address;
3111 }
3112
3113 return ERROR_OK;
3114 }
3115
3116 COMMAND_HANDLER(xscale_handle_cache_info_command)
3117 {
3118 struct target *target = get_current_target(CMD_CTX);
3119 struct xscale_common *xscale = target_to_xscale(target);
3120 int retval;
3121
3122 retval = xscale_verify_pointer(CMD_CTX, xscale);
3123 if (retval != ERROR_OK)
3124 return retval;
3125
3126 return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
3127 }
3128
3129 static int xscale_virt2phys(struct target *target,
3130 uint32_t virtual, uint32_t *physical)
3131 {
3132 struct xscale_common *xscale = target_to_xscale(target);
3133 int type;
3134 uint32_t cb;
3135 int domain;
3136 uint32_t ap;
3137
3138 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3139 LOG_ERROR(xscale_not);
3140 return ERROR_TARGET_INVALID;
3141 }
3142
3143 uint32_t ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3144 if (type == -1)
3145 {
3146 return ret;
3147 }
3148 *physical = ret;
3149 return ERROR_OK;
3150 }
3151
3152 static int xscale_mmu(struct target *target, int *enabled)
3153 {
3154 struct xscale_common *xscale = target_to_xscale(target);
3155
3156 if (target->state != TARGET_HALTED)
3157 {
3158 LOG_ERROR("Target not halted");
3159 return ERROR_TARGET_INVALID;
3160 }
3161 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3162 return ERROR_OK;
3163 }
3164
3165 COMMAND_HANDLER(xscale_handle_mmu_command)
3166 {
3167 struct target *target = get_current_target(CMD_CTX);
3168 struct xscale_common *xscale = target_to_xscale(target);
3169 int retval;
3170
3171 retval = xscale_verify_pointer(CMD_CTX, xscale);
3172 if (retval != ERROR_OK)
3173 return retval;
3174
3175 if (target->state != TARGET_HALTED)
3176 {
3177 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3178 return ERROR_OK;
3179 }
3180
3181 if (CMD_ARGC >= 1)
3182 {
3183 bool enable;
3184 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3185 if (enable)
3186 xscale_enable_mmu_caches(target, 1, 0, 0);
3187 else
3188 xscale_disable_mmu_caches(target, 1, 0, 0);
3189 xscale->armv4_5_mmu.mmu_enabled = enable;
3190 }
3191
3192 command_print(CMD_CTX, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3193
3194 return ERROR_OK;
3195 }
3196
3197 COMMAND_HANDLER(xscale_handle_idcache_command)
3198 {
3199 struct target *target = get_current_target(CMD_CTX);
3200 struct xscale_common *xscale = target_to_xscale(target);
3201
3202 int retval = xscale_verify_pointer(CMD_CTX, xscale);
3203 if (retval != ERROR_OK)
3204 return retval;
3205
3206 if (target->state != TARGET_HALTED)
3207 {
3208 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3209 return ERROR_OK;
3210 }
3211
3212 bool icache = false;
3213 if (strcmp(CMD_NAME, "icache") == 0)
3214 icache = true;
3215 if (CMD_ARGC >= 1)
3216 {
3217 bool enable;
3218 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3219 if (icache) {
3220 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3221 if (enable)
3222 xscale_enable_mmu_caches(target, 0, 0, 1);
3223 else
3224 xscale_disable_mmu_caches(target, 0, 0, 1);
3225 } else {
3226 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3227 if (enable)
3228 xscale_enable_mmu_caches(target, 0, 1, 0);
3229 else
3230 xscale_disable_mmu_caches(target, 0, 1, 0);
3231 }
3232 }
3233
3234 bool enabled = icache ?
3235 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3236 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3237 const char *msg = enabled ? "enabled" : "disabled";
3238 command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
3239
3240 return ERROR_OK;
3241 }
3242
3243 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3244 {
3245 struct target *target = get_current_target(CMD_CTX);
3246 struct xscale_common *xscale = target_to_xscale(target);
3247 int retval;
3248
3249 retval = xscale_verify_pointer(CMD_CTX, xscale);
3250 if (retval != ERROR_OK)
3251 return retval;
3252
3253 if (CMD_ARGC < 1)
3254 {
3255 command_print(CMD_CTX, "usage: xscale vector_catch [mask]");
3256 }
3257 else
3258 {
3259 COMMAND_PARSE_NUMBER(u8, CMD_ARGV[0], xscale->vector_catch);
3260 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3261 xscale_write_dcsr(target, -1, -1);
3262 }
3263
3264 command_print(CMD_CTX, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3265
3266 return ERROR_OK;
3267 }
3268
3269
3270 COMMAND_HANDLER(xscale_handle_vector_table_command)
3271 {
3272 struct target *target = get_current_target(CMD_CTX);
3273 struct xscale_common *xscale = target_to_xscale(target);
3274 int err = 0;
3275 int retval;
3276
3277 retval = xscale_verify_pointer(CMD_CTX, xscale);
3278 if (retval != ERROR_OK)
3279 return retval;
3280
3281 if (CMD_ARGC == 0) /* print current settings */
3282 {
3283 int idx;
3284
3285 command_print(CMD_CTX, "active user-set static vectors:");
3286 for (idx = 1; idx < 8; idx++)
3287 if (xscale->static_low_vectors_set & (1 << idx))
3288 command_print(CMD_CTX, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
3289 for (idx = 1; idx < 8; idx++)
3290 if (xscale->static_high_vectors_set & (1 << idx))
3291 command_print(CMD_CTX, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
3292 return ERROR_OK;
3293 }
3294
3295 if (CMD_ARGC != 3)
3296 err = 1;
3297 else
3298 {
3299 int idx;
3300 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3301 uint32_t vec;
3302 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3303
3304 if (idx < 1 || idx >= 8)
3305 err = 1;
3306
3307 if (!err && strcmp(CMD_ARGV[0], "low") == 0)
3308 {
3309 xscale->static_low_vectors_set |= (1<<idx);
3310 xscale->static_low_vectors[idx] = vec;
3311 }
3312 else if (!err && (strcmp(CMD_ARGV[0], "high") == 0))
3313 {
3314 xscale->static_high_vectors_set |= (1<<idx);
3315 xscale->static_high_vectors[idx] = vec;
3316 }
3317 else
3318 err = 1;
3319 }
3320
3321 if (err)
3322 command_print(CMD_CTX, "usage: xscale vector_table <high|low> <index> <code>");
3323
3324 return ERROR_OK;
3325 }
3326
3327
3328 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3329 {
3330 struct target *target = get_current_target(CMD_CTX);
3331 struct xscale_common *xscale = target_to_xscale(target);
3332 struct arm *armv4_5 = &xscale->armv4_5_common;
3333 uint32_t dcsr_value;
3334 int retval;
3335
3336 retval = xscale_verify_pointer(CMD_CTX, xscale);
3337 if (retval != ERROR_OK)
3338 return retval;
3339
3340 if (target->state != TARGET_HALTED)
3341 {
3342 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3343 return ERROR_OK;
3344 }
3345
3346 if ((CMD_ARGC >= 1) && (strcmp("enable", CMD_ARGV[0]) == 0))
3347 {
3348 struct xscale_trace_data *td, *next_td;
3349 xscale->trace.buffer_enabled = 1;
3350
3351 /* free old trace data */
3352 td = xscale->trace.data;
3353 while (td)
3354 {
3355 next_td = td->next;
3356
3357 if (td->entries)
3358 free(td->entries);
3359 free(td);
3360 td = next_td;
3361 }
3362 xscale->trace.data = NULL;
3363 }
3364 else if ((CMD_ARGC >= 1) && (strcmp("disable", CMD_ARGV[0]) == 0))
3365 {
3366 xscale->trace.buffer_enabled = 0;
3367 }
3368
3369 if ((CMD_ARGC >= 2) && (strcmp("fill", CMD_ARGV[1]) == 0))
3370 {
3371 uint32_t fill = 1;
3372 if (CMD_ARGC >= 3)
3373 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], fill);
3374 xscale->trace.buffer_fill = fill;
3375 }
3376 else if ((CMD_ARGC >= 2) && (strcmp("wrap", CMD_ARGV[1]) == 0))
3377 {
3378 xscale->trace.buffer_fill = -1;
3379 }
3380
3381 if (xscale->trace.buffer_enabled)
3382 {
3383 /* if we enable the trace buffer in fill-once
3384 * mode we know the address of the first instruction */
3385 xscale->trace.pc_ok = 1;
3386 xscale->trace.current_pc =
3387 buf_get_u32(armv4_5->pc->value, 0, 32);
3388 }
3389 else
3390 {
3391 /* otherwise the address is unknown, and we have no known good PC */
3392 xscale->trace.pc_ok = 0;
3393 }
3394
3395 command_print(CMD_CTX, "trace buffer %s (%s)",
3396 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3397 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3398
3399 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3400 if (xscale->trace.buffer_fill >= 0)
3401 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3402 else
3403 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3404
3405 return ERROR_OK;
3406 }
3407
3408 COMMAND_HANDLER(xscale_handle_trace_image_command)
3409 {
3410 struct target *target = get_current_target(CMD_CTX);
3411 struct xscale_common *xscale = target_to_xscale(target);
3412 int retval;
3413
3414 if (CMD_ARGC < 1)
3415 {
3416 command_print(CMD_CTX, "usage: xscale trace_image <file> [base address] [type]");
3417 return ERROR_OK;
3418 }
3419
3420 retval = xscale_verify_pointer(CMD_CTX, xscale);
3421 if (retval != ERROR_OK)
3422 return retval;
3423
3424 if (xscale->trace.image)
3425 {
3426 image_close(xscale->trace.image);
3427 free(xscale->trace.image);
3428 command_print(CMD_CTX, "previously loaded image found and closed");
3429 }
3430
3431 xscale->trace.image = malloc(sizeof(struct image));
3432 xscale->trace.image->base_address_set = 0;
3433 xscale->trace.image->start_address_set = 0;
3434
3435 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3436 if (CMD_ARGC >= 2)
3437 {
3438 xscale->trace.image->base_address_set = 1;
3439 COMMAND_PARSE_NUMBER(llong, CMD_ARGV[1], xscale->trace.image->base_address);
3440 }
3441 else
3442 {
3443 xscale->trace.image->base_address_set = 0;
3444 }
3445
3446 if (image_open(xscale->trace.image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3447 {
3448 free(xscale->trace.image);
3449 xscale->trace.image = NULL;
3450 return ERROR_OK;
3451 }
3452
3453 return ERROR_OK;
3454 }
3455
3456 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3457 {
3458 struct target *target = get_current_target(CMD_CTX);
3459 struct xscale_common *xscale = target_to_xscale(target);
3460 struct xscale_trace_data *trace_data;
3461 struct fileio file;
3462 int retval;
3463
3464 retval = xscale_verify_pointer(CMD_CTX, xscale);
3465 if (retval != ERROR_OK)
3466 return retval;
3467
3468 if (target->state != TARGET_HALTED)
3469 {
3470 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3471 return ERROR_OK;
3472 }
3473
3474 if (CMD_ARGC < 1)
3475 {
3476 command_print(CMD_CTX, "usage: xscale dump_trace <file>");
3477 return ERROR_OK;
3478 }
3479
3480 trace_data = xscale->trace.data;
3481
3482 if (!trace_data)
3483 {
3484 command_print(CMD_CTX, "no trace data collected");
3485 return ERROR_OK;
3486 }
3487
3488 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3489 {
3490 return ERROR_OK;
3491 }
3492
3493 while (trace_data)
3494 {
3495 int i;
3496
3497 fileio_write_u32(&file, trace_data->chkpt0);
3498 fileio_write_u32(&file, trace_data->chkpt1);
3499 fileio_write_u32(&file, trace_data->last_instruction);
3500 fileio_write_u32(&file, trace_data->depth);
3501
3502 for (i = 0; i < trace_data->depth; i++)
3503 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3504
3505 trace_data = trace_data->next;
3506 }
3507
3508 fileio_close(&file);
3509
3510 return ERROR_OK;
3511 }
3512
3513 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3514 {
3515 struct target *target = get_current_target(CMD_CTX);
3516 struct xscale_common *xscale = target_to_xscale(target);
3517 int retval;
3518
3519 retval = xscale_verify_pointer(CMD_CTX, xscale);
3520 if (retval != ERROR_OK)
3521 return retval;
3522
3523 xscale_analyze_trace(target, CMD_CTX);
3524
3525 return ERROR_OK;
3526 }
3527
3528 COMMAND_HANDLER(xscale_handle_cp15)
3529 {
3530 struct target *target = get_current_target(CMD_CTX);
3531 struct xscale_common *xscale = target_to_xscale(target);
3532 int retval;
3533
3534 retval = xscale_verify_pointer(CMD_CTX, xscale);
3535 if (retval != ERROR_OK)
3536 return retval;
3537
3538 if (target->state != TARGET_HALTED)
3539 {
3540 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3541 return ERROR_OK;
3542 }
3543 uint32_t reg_no = 0;
3544 struct reg *reg = NULL;
3545 if (CMD_ARGC > 0)
3546 {
3547 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3548 /*translate from xscale cp15 register no to openocd register*/
3549 switch (reg_no)
3550 {
3551 case 0:
3552 reg_no = XSCALE_MAINID;
3553 break;
3554 case 1:
3555 reg_no = XSCALE_CTRL;
3556 break;
3557 case 2:
3558 reg_no = XSCALE_TTB;
3559 break;
3560 case 3:
3561 reg_no = XSCALE_DAC;
3562 break;
3563 case 5:
3564 reg_no = XSCALE_FSR;
3565 break;
3566 case 6:
3567 reg_no = XSCALE_FAR;
3568 break;
3569 case 13:
3570 reg_no = XSCALE_PID;
3571 break;
3572 case 15:
3573 reg_no = XSCALE_CPACCESS;
3574 break;
3575 default:
3576 command_print(CMD_CTX, "invalid register number");
3577 return ERROR_INVALID_ARGUMENTS;
3578 }
3579 reg = &xscale->reg_cache->reg_list[reg_no];
3580
3581 }
3582 if (CMD_ARGC == 1)
3583 {
3584 uint32_t value;
3585
3586 /* read cp15 control register */
3587 xscale_get_reg(reg);
3588 value = buf_get_u32(reg->value, 0, 32);
3589 command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
3590 }
3591 else if (CMD_ARGC == 2)
3592 {
3593 uint32_t value;
3594 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3595
3596 /* send CP write request (command 0x41) */
3597 xscale_send_u32(target, 0x41);
3598
3599 /* send CP register number */
3600 xscale_send_u32(target, reg_no);
3601
3602 /* send CP register value */
3603 xscale_send_u32(target, value);
3604
3605 /* execute cpwait to ensure outstanding operations complete */
3606 xscale_send_u32(target, 0x53);
3607 }
3608 else
3609 {
3610 command_print(CMD_CTX, "usage: cp15 [register]<, [value]>");
3611 }
3612
3613 return ERROR_OK;
3614 }
3615
3616 static const struct command_registration xscale_exec_command_handlers[] = {
3617 {
3618 .name = "cache_info",
3619 .handler = xscale_handle_cache_info_command,
3620 .mode = COMMAND_EXEC,
3621 .help = "display information about CPU caches",
3622 },
3623 {
3624 .name = "mmu",
3625 .handler = xscale_handle_mmu_command,
3626 .mode = COMMAND_EXEC,
3627 .help = "enable or disable the MMU",
3628 .usage = "['enable'|'disable']",
3629 },
3630 {
3631 .name = "icache",
3632 .handler = xscale_handle_idcache_command,
3633 .mode = COMMAND_EXEC,
3634 .help = "display ICache state, optionally enabling or "
3635 "disabling it",
3636 .usage = "['enable'|'disable']",
3637 },
3638 {
3639 .name = "dcache",
3640 .handler = xscale_handle_idcache_command,
3641 .mode = COMMAND_EXEC,
3642 .help = "display DCache state, optionally enabling or "
3643 "disabling it",
3644 .usage = "['enable'|'disable']",
3645 },
3646 {
3647 .name = "vector_catch",
3648 .handler = xscale_handle_vector_catch_command,
3649 .mode = COMMAND_EXEC,
3650 .help = "set or display 8-bit mask of vectors "
3651 "that should trigger debug entry",
3652 .usage = "[mask]",
3653 },
3654 {
3655 .name = "vector_table",
3656 .handler = xscale_handle_vector_table_command,
3657 .mode = COMMAND_EXEC,
3658 .help = "set vector table entry in mini-ICache, "
3659 "or display current tables",
3660 .usage = "[('high'|'low') index code]",
3661 },
3662 {
3663 .name = "trace_buffer",
3664 .handler = xscale_handle_trace_buffer_command,
3665 .mode = COMMAND_EXEC,
3666 .help = "display trace buffer status, enable or disable "
3667 "tracing, and optionally reconfigure trace mode",
3668 .usage = "['enable'|'disable' ['fill' number|'wrap']]",
3669 },
3670 {
3671 .name = "dump_trace",
3672 .handler = xscale_handle_dump_trace_command,
3673 .mode = COMMAND_EXEC,
3674 .help = "dump content of trace buffer to file",
3675 .usage = "filename",
3676 },
3677 {
3678 .name = "analyze_trace",
3679 .handler = xscale_handle_analyze_trace_buffer_command,
3680 .mode = COMMAND_EXEC,
3681 .help = "analyze content of trace buffer",
3682 .usage = "",
3683 },
3684 {
3685 .name = "trace_image",
3686 .handler = xscale_handle_trace_image_command,
3687 .mode = COMMAND_EXEC,
3688 .help = "load image from file to address (default 0)",
3689 .usage = "filename [offset [filetype]]",
3690 },
3691 {
3692 .name = "cp15",
3693 .handler = xscale_handle_cp15,
3694 .mode = COMMAND_EXEC,
3695 .help = "Read or write coprocessor 15 register.",
3696 .usage = "register [value]",
3697 },
3698 COMMAND_REGISTRATION_DONE
3699 };
3700 static const struct command_registration xscale_any_command_handlers[] = {
3701 {
3702 .name = "debug_handler",
3703 .handler = xscale_handle_debug_handler_command,
3704 .mode = COMMAND_ANY,
3705 .help = "Change address used for debug handler.",
3706 .usage = "target address",
3707 },
3708 {
3709 .name = "cache_clean_address",
3710 .handler = xscale_handle_cache_clean_address_command,
3711 .mode = COMMAND_ANY,
3712 .help = "Change address used for cleaning data cache.",
3713 .usage = "address",
3714 },
3715 {
3716 .chain = xscale_exec_command_handlers,
3717 },
3718 COMMAND_REGISTRATION_DONE
3719 };
3720 static const struct command_registration xscale_command_handlers[] = {
3721 {
3722 .chain = arm_command_handlers,
3723 },
3724 {
3725 .name = "xscale",
3726 .mode = COMMAND_ANY,
3727 .help = "xscale command group",
3728 .chain = xscale_any_command_handlers,
3729 },
3730 COMMAND_REGISTRATION_DONE
3731 };
3732
3733 struct target_type xscale_target =
3734 {
3735 .name = "xscale",
3736
3737 .poll = xscale_poll,
3738 .arch_state = xscale_arch_state,
3739
3740 .target_request_data = NULL,
3741
3742 .halt = xscale_halt,
3743 .resume = xscale_resume,
3744 .step = xscale_step,
3745
3746 .assert_reset = xscale_assert_reset,
3747 .deassert_reset = xscale_deassert_reset,
3748 .soft_reset_halt = NULL,
3749
3750 /* REVISIT on some cores, allow exporting iwmmxt registers ... */
3751 .get_gdb_reg_list = arm_get_gdb_reg_list,
3752
3753 .read_memory = xscale_read_memory,
3754 .read_phys_memory = xscale_read_phys_memory,
3755 .write_memory = xscale_write_memory,
3756 .write_phys_memory = xscale_write_phys_memory,
3757 .bulk_write_memory = xscale_bulk_write_memory,
3758
3759 .checksum_memory = arm_checksum_memory,
3760 .blank_check_memory = arm_blank_check_memory,
3761
3762 .run_algorithm = armv4_5_run_algorithm,
3763
3764 .add_breakpoint = xscale_add_breakpoint,
3765 .remove_breakpoint = xscale_remove_breakpoint,
3766 .add_watchpoint = xscale_add_watchpoint,
3767 .remove_watchpoint = xscale_remove_watchpoint,
3768
3769 .commands = xscale_command_handlers,
3770 .target_create = xscale_target_create,
3771 .init_target = xscale_init_target,
3772
3773 .virt2phys = xscale_virt2phys,
3774 .mmu = xscale_mmu
3775 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)