arm: add error propagation for enable/disable mmu caches
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
29
30 #include "breakpoints.h"
31 #include "xscale.h"
32 #include "target_type.h"
33 #include "arm_jtag.h"
34 #include "arm_simulator.h"
35 #include "arm_disassembler.h"
36 #include <helper/time_support.h>
37 #include "register.h"
38 #include "image.h"
39 #include "arm_opcodes.h"
40 #include "armv4_5.h"
41
42
43 /*
44 * Important XScale documents available as of October 2009 include:
45 *
46 * Intel XScale® Core Developer’s Manual, January 2004
47 * Order Number: 273473-002
48 * This has a chapter detailing debug facilities, and punts some
49 * details to chip-specific microarchitecture documents.
50 *
51 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
52 * Document Number: 273539-005
53 * Less detailed than the developer's manual, but summarizes those
54 * missing details (for most XScales) and gives LOTS of notes about
55 * debugger/handler interaction issues. Presents a simpler reset
56 * and load-handler sequence than the arch doc. (Note, OpenOCD
57 * doesn't currently support "Hot-Debug" as defined there.)
58 *
59 * Chip-specific microarchitecture documents may also be useful.
60 */
61
62
63 /* forward declarations */
64 static int xscale_resume(struct target *, int current,
65 uint32_t address, int handle_breakpoints, int debug_execution);
66 static int xscale_debug_entry(struct target *);
67 static int xscale_restore_banked(struct target *);
68 static int xscale_get_reg(struct reg *reg);
69 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
70 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
71 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
72 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
73 static int xscale_read_trace(struct target *);
74
75
76 /* This XScale "debug handler" is loaded into the processor's
77 * mini-ICache, which is 2K of code writable only via JTAG.
78 *
79 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
80 * binary files cleanly. It's string oriented, and terminates them
81 * with a NUL character. Better would be to generate the constants
82 * and let other code decide names, scoping, and other housekeeping.
83 */
84 static /* unsigned const char xscale_debug_handler[] = ... */
85 #include "xscale_debug.h"
86
87 static char *const xscale_reg_list[] =
88 {
89 "XSCALE_MAINID", /* 0 */
90 "XSCALE_CACHETYPE",
91 "XSCALE_CTRL",
92 "XSCALE_AUXCTRL",
93 "XSCALE_TTB",
94 "XSCALE_DAC",
95 "XSCALE_FSR",
96 "XSCALE_FAR",
97 "XSCALE_PID",
98 "XSCALE_CPACCESS",
99 "XSCALE_IBCR0", /* 10 */
100 "XSCALE_IBCR1",
101 "XSCALE_DBR0",
102 "XSCALE_DBR1",
103 "XSCALE_DBCON",
104 "XSCALE_TBREG",
105 "XSCALE_CHKPT0",
106 "XSCALE_CHKPT1",
107 "XSCALE_DCSR",
108 "XSCALE_TX",
109 "XSCALE_RX", /* 20 */
110 "XSCALE_TXRXCTRL",
111 };
112
113 static const struct xscale_reg xscale_reg_arch_info[] =
114 {
115 {XSCALE_MAINID, NULL},
116 {XSCALE_CACHETYPE, NULL},
117 {XSCALE_CTRL, NULL},
118 {XSCALE_AUXCTRL, NULL},
119 {XSCALE_TTB, NULL},
120 {XSCALE_DAC, NULL},
121 {XSCALE_FSR, NULL},
122 {XSCALE_FAR, NULL},
123 {XSCALE_PID, NULL},
124 {XSCALE_CPACCESS, NULL},
125 {XSCALE_IBCR0, NULL},
126 {XSCALE_IBCR1, NULL},
127 {XSCALE_DBR0, NULL},
128 {XSCALE_DBR1, NULL},
129 {XSCALE_DBCON, NULL},
130 {XSCALE_TBREG, NULL},
131 {XSCALE_CHKPT0, NULL},
132 {XSCALE_CHKPT1, NULL},
133 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
134 {-1, NULL}, /* TX accessed via JTAG */
135 {-1, NULL}, /* RX accessed via JTAG */
136 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
137 };
138
139 /* convenience wrapper to access XScale specific registers */
140 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
141 {
142 uint8_t buf[4];
143
144 buf_set_u32(buf, 0, 32, value);
145
146 return xscale_set_reg(reg, buf);
147 }
148
149 static const char xscale_not[] = "target is not an XScale";
150
151 static int xscale_verify_pointer(struct command_context *cmd_ctx,
152 struct xscale_common *xscale)
153 {
154 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
155 command_print(cmd_ctx, xscale_not);
156 return ERROR_TARGET_INVALID;
157 }
158 return ERROR_OK;
159 }
160
161 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr, tap_state_t end_state)
162 {
163 if (tap == NULL)
164 return ERROR_FAIL;
165
166 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
167 {
168 struct scan_field field;
169 uint8_t scratch[4];
170
171 memset(&field, 0, sizeof field);
172 field.num_bits = tap->ir_length;
173 field.out_value = scratch;
174 buf_set_u32(scratch, 0, field.num_bits, new_instr);
175
176 jtag_add_ir_scan(tap, &field, end_state);
177 }
178
179 return ERROR_OK;
180 }
181
182 static int xscale_read_dcsr(struct target *target)
183 {
184 struct xscale_common *xscale = target_to_xscale(target);
185 int retval;
186 struct scan_field fields[3];
187 uint8_t field0 = 0x0;
188 uint8_t field0_check_value = 0x2;
189 uint8_t field0_check_mask = 0x7;
190 uint8_t field2 = 0x0;
191 uint8_t field2_check_value = 0x0;
192 uint8_t field2_check_mask = 0x1;
193
194 xscale_jtag_set_instr(target->tap,
195 XSCALE_SELDCSR << xscale->xscale_variant,
196 TAP_DRPAUSE);
197
198 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
199 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
200
201 memset(&fields, 0, sizeof fields);
202
203 fields[0].num_bits = 3;
204 fields[0].out_value = &field0;
205 uint8_t tmp;
206 fields[0].in_value = &tmp;
207
208 fields[1].num_bits = 32;
209 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
210
211 fields[2].num_bits = 1;
212 fields[2].out_value = &field2;
213 uint8_t tmp2;
214 fields[2].in_value = &tmp2;
215
216 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
217
218 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
219 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
220
221 if ((retval = jtag_execute_queue()) != ERROR_OK)
222 {
223 LOG_ERROR("JTAG error while reading DCSR");
224 return retval;
225 }
226
227 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
228 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
229
230 /* write the register with the value we just read
231 * on this second pass, only the first bit of field0 is guaranteed to be 0)
232 */
233 field0_check_mask = 0x1;
234 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
235 fields[1].in_value = NULL;
236
237 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
238
239 /* DANGER!!! this must be here. It will make sure that the arguments
240 * to jtag_set_check_value() does not go out of scope! */
241 return jtag_execute_queue();
242 }
243
244
245 static void xscale_getbuf(jtag_callback_data_t arg)
246 {
247 uint8_t *in = (uint8_t *)arg;
248 *((uint32_t *)in) = buf_get_u32(in, 0, 32);
249 }
250
251 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
252 {
253 if (num_words == 0)
254 return ERROR_INVALID_ARGUMENTS;
255
256 struct xscale_common *xscale = target_to_xscale(target);
257 int retval = ERROR_OK;
258 tap_state_t path[3];
259 struct scan_field fields[3];
260 uint8_t *field0 = malloc(num_words * 1);
261 uint8_t field0_check_value = 0x2;
262 uint8_t field0_check_mask = 0x6;
263 uint32_t *field1 = malloc(num_words * 4);
264 uint8_t field2_check_value = 0x0;
265 uint8_t field2_check_mask = 0x1;
266 int words_done = 0;
267 int words_scheduled = 0;
268 int i;
269
270 path[0] = TAP_DRSELECT;
271 path[1] = TAP_DRCAPTURE;
272 path[2] = TAP_DRSHIFT;
273
274 memset(&fields, 0, sizeof fields);
275
276 fields[0].num_bits = 3;
277 fields[0].check_value = &field0_check_value;
278 fields[0].check_mask = &field0_check_mask;
279
280 fields[1].num_bits = 32;
281
282 fields[2].num_bits = 1;
283 fields[2].check_value = &field2_check_value;
284 fields[2].check_mask = &field2_check_mask;
285
286 xscale_jtag_set_instr(target->tap,
287 XSCALE_DBGTX << xscale->xscale_variant,
288 TAP_IDLE);
289 jtag_add_runtest(1, TAP_IDLE); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
290
291 /* repeat until all words have been collected */
292 int attempts = 0;
293 while (words_done < num_words)
294 {
295 /* schedule reads */
296 words_scheduled = 0;
297 for (i = words_done; i < num_words; i++)
298 {
299 fields[0].in_value = &field0[i];
300
301 jtag_add_pathmove(3, path);
302
303 fields[1].in_value = (uint8_t *)(field1 + i);
304
305 jtag_add_dr_scan_check(target->tap, 3, fields, TAP_IDLE);
306
307 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
308
309 words_scheduled++;
310 }
311
312 if ((retval = jtag_execute_queue()) != ERROR_OK)
313 {
314 LOG_ERROR("JTAG error while receiving data from debug handler");
315 break;
316 }
317
318 /* examine results */
319 for (i = words_done; i < num_words; i++)
320 {
321 if (!(field0[0] & 1))
322 {
323 /* move backwards if necessary */
324 int j;
325 for (j = i; j < num_words - 1; j++)
326 {
327 field0[j] = field0[j + 1];
328 field1[j] = field1[j + 1];
329 }
330 words_scheduled--;
331 }
332 }
333 if (words_scheduled == 0)
334 {
335 if (attempts++==1000)
336 {
337 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
338 retval = ERROR_TARGET_TIMEOUT;
339 break;
340 }
341 }
342
343 words_done += words_scheduled;
344 }
345
346 for (i = 0; i < num_words; i++)
347 *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
348
349 free(field1);
350
351 return retval;
352 }
353
354 static int xscale_read_tx(struct target *target, int consume)
355 {
356 struct xscale_common *xscale = target_to_xscale(target);
357 tap_state_t path[3];
358 tap_state_t noconsume_path[6];
359 int retval;
360 struct timeval timeout, now;
361 struct scan_field fields[3];
362 uint8_t field0_in = 0x0;
363 uint8_t field0_check_value = 0x2;
364 uint8_t field0_check_mask = 0x6;
365 uint8_t field2_check_value = 0x0;
366 uint8_t field2_check_mask = 0x1;
367
368 xscale_jtag_set_instr(target->tap,
369 XSCALE_DBGTX << xscale->xscale_variant,
370 TAP_IDLE);
371
372 path[0] = TAP_DRSELECT;
373 path[1] = TAP_DRCAPTURE;
374 path[2] = TAP_DRSHIFT;
375
376 noconsume_path[0] = TAP_DRSELECT;
377 noconsume_path[1] = TAP_DRCAPTURE;
378 noconsume_path[2] = TAP_DREXIT1;
379 noconsume_path[3] = TAP_DRPAUSE;
380 noconsume_path[4] = TAP_DREXIT2;
381 noconsume_path[5] = TAP_DRSHIFT;
382
383 memset(&fields, 0, sizeof fields);
384
385 fields[0].num_bits = 3;
386 fields[0].in_value = &field0_in;
387
388 fields[1].num_bits = 32;
389 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
390
391 fields[2].num_bits = 1;
392 uint8_t tmp;
393 fields[2].in_value = &tmp;
394
395 gettimeofday(&timeout, NULL);
396 timeval_add_time(&timeout, 1, 0);
397
398 for (;;)
399 {
400 /* if we want to consume the register content (i.e. clear TX_READY),
401 * we have to go straight from Capture-DR to Shift-DR
402 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
403 */
404 if (consume)
405 jtag_add_pathmove(3, path);
406 else
407 {
408 jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
409 }
410
411 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
412
413 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
414 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
415
416 if ((retval = jtag_execute_queue()) != ERROR_OK)
417 {
418 LOG_ERROR("JTAG error while reading TX");
419 return ERROR_TARGET_TIMEOUT;
420 }
421
422 gettimeofday(&now, NULL);
423 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
424 {
425 LOG_ERROR("time out reading TX register");
426 return ERROR_TARGET_TIMEOUT;
427 }
428 if (!((!(field0_in & 1)) && consume))
429 {
430 goto done;
431 }
432 if (debug_level >= 3)
433 {
434 LOG_DEBUG("waiting 100ms");
435 alive_sleep(100); /* avoid flooding the logs */
436 } else
437 {
438 keep_alive();
439 }
440 }
441 done:
442
443 if (!(field0_in & 1))
444 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
445
446 return ERROR_OK;
447 }
448
449 static int xscale_write_rx(struct target *target)
450 {
451 struct xscale_common *xscale = target_to_xscale(target);
452 int retval;
453 struct timeval timeout, now;
454 struct scan_field fields[3];
455 uint8_t field0_out = 0x0;
456 uint8_t field0_in = 0x0;
457 uint8_t field0_check_value = 0x2;
458 uint8_t field0_check_mask = 0x6;
459 uint8_t field2 = 0x0;
460 uint8_t field2_check_value = 0x0;
461 uint8_t field2_check_mask = 0x1;
462
463 xscale_jtag_set_instr(target->tap,
464 XSCALE_DBGRX << xscale->xscale_variant,
465 TAP_IDLE);
466
467 memset(&fields, 0, sizeof fields);
468
469 fields[0].num_bits = 3;
470 fields[0].out_value = &field0_out;
471 fields[0].in_value = &field0_in;
472
473 fields[1].num_bits = 32;
474 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
475
476 fields[2].num_bits = 1;
477 fields[2].out_value = &field2;
478 uint8_t tmp;
479 fields[2].in_value = &tmp;
480
481 gettimeofday(&timeout, NULL);
482 timeval_add_time(&timeout, 1, 0);
483
484 /* poll until rx_read is low */
485 LOG_DEBUG("polling RX");
486 for (;;)
487 {
488 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
489
490 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
491 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
492
493 if ((retval = jtag_execute_queue()) != ERROR_OK)
494 {
495 LOG_ERROR("JTAG error while writing RX");
496 return retval;
497 }
498
499 gettimeofday(&now, NULL);
500 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
501 {
502 LOG_ERROR("time out writing RX register");
503 return ERROR_TARGET_TIMEOUT;
504 }
505 if (!(field0_in & 1))
506 goto done;
507 if (debug_level >= 3)
508 {
509 LOG_DEBUG("waiting 100ms");
510 alive_sleep(100); /* avoid flooding the logs */
511 } else
512 {
513 keep_alive();
514 }
515 }
516 done:
517
518 /* set rx_valid */
519 field2 = 0x1;
520 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
521
522 if ((retval = jtag_execute_queue()) != ERROR_OK)
523 {
524 LOG_ERROR("JTAG error while writing RX");
525 return retval;
526 }
527
528 return ERROR_OK;
529 }
530
531 /* send count elements of size byte to the debug handler */
532 static int xscale_send(struct target *target, uint8_t *buffer, int count, int size)
533 {
534 struct xscale_common *xscale = target_to_xscale(target);
535 uint32_t t[3];
536 int bits[3];
537 int retval;
538 int done_count = 0;
539
540 xscale_jtag_set_instr(target->tap,
541 XSCALE_DBGRX << xscale->xscale_variant,
542 TAP_IDLE);
543
544 bits[0]=3;
545 t[0]=0;
546 bits[1]=32;
547 t[2]=1;
548 bits[2]=1;
549 int endianness = target->endianness;
550 while (done_count++ < count)
551 {
552 switch (size)
553 {
554 case 4:
555 if (endianness == TARGET_LITTLE_ENDIAN)
556 {
557 t[1]=le_to_h_u32(buffer);
558 } else
559 {
560 t[1]=be_to_h_u32(buffer);
561 }
562 break;
563 case 2:
564 if (endianness == TARGET_LITTLE_ENDIAN)
565 {
566 t[1]=le_to_h_u16(buffer);
567 } else
568 {
569 t[1]=be_to_h_u16(buffer);
570 }
571 break;
572 case 1:
573 t[1]=buffer[0];
574 break;
575 default:
576 LOG_ERROR("BUG: size neither 4, 2 nor 1");
577 return ERROR_INVALID_ARGUMENTS;
578 }
579 jtag_add_dr_out(target->tap,
580 3,
581 bits,
582 t,
583 TAP_IDLE);
584 buffer += size;
585 }
586
587 if ((retval = jtag_execute_queue()) != ERROR_OK)
588 {
589 LOG_ERROR("JTAG error while sending data to debug handler");
590 return retval;
591 }
592
593 return ERROR_OK;
594 }
595
596 static int xscale_send_u32(struct target *target, uint32_t value)
597 {
598 struct xscale_common *xscale = target_to_xscale(target);
599
600 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
601 return xscale_write_rx(target);
602 }
603
604 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
605 {
606 struct xscale_common *xscale = target_to_xscale(target);
607 int retval;
608 struct scan_field fields[3];
609 uint8_t field0 = 0x0;
610 uint8_t field0_check_value = 0x2;
611 uint8_t field0_check_mask = 0x7;
612 uint8_t field2 = 0x0;
613 uint8_t field2_check_value = 0x0;
614 uint8_t field2_check_mask = 0x1;
615
616 if (hold_rst != -1)
617 xscale->hold_rst = hold_rst;
618
619 if (ext_dbg_brk != -1)
620 xscale->external_debug_break = ext_dbg_brk;
621
622 xscale_jtag_set_instr(target->tap,
623 XSCALE_SELDCSR << xscale->xscale_variant,
624 TAP_IDLE);
625
626 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
627 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
628
629 memset(&fields, 0, sizeof fields);
630
631 fields[0].num_bits = 3;
632 fields[0].out_value = &field0;
633 uint8_t tmp;
634 fields[0].in_value = &tmp;
635
636 fields[1].num_bits = 32;
637 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
638
639 fields[2].num_bits = 1;
640 fields[2].out_value = &field2;
641 uint8_t tmp2;
642 fields[2].in_value = &tmp2;
643
644 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
645
646 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
647 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
648
649 if ((retval = jtag_execute_queue()) != ERROR_OK)
650 {
651 LOG_ERROR("JTAG error while writing DCSR");
652 return retval;
653 }
654
655 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
656 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
657
658 return ERROR_OK;
659 }
660
661 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
662 static unsigned int parity (unsigned int v)
663 {
664 // unsigned int ov = v;
665 v ^= v >> 16;
666 v ^= v >> 8;
667 v ^= v >> 4;
668 v &= 0xf;
669 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
670 return (0x6996 >> v) & 1;
671 }
672
673 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
674 {
675 struct xscale_common *xscale = target_to_xscale(target);
676 uint8_t packet[4];
677 uint8_t cmd;
678 int word;
679 struct scan_field fields[2];
680
681 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
682
683 /* LDIC into IR */
684 xscale_jtag_set_instr(target->tap,
685 XSCALE_LDIC << xscale->xscale_variant,
686 TAP_IDLE);
687
688 /* CMD is b011 to load a cacheline into the Mini ICache.
689 * Loading into the main ICache is deprecated, and unused.
690 * It's followed by three zero bits, and 27 address bits.
691 */
692 buf_set_u32(&cmd, 0, 6, 0x3);
693
694 /* virtual address of desired cache line */
695 buf_set_u32(packet, 0, 27, va >> 5);
696
697 memset(&fields, 0, sizeof fields);
698
699 fields[0].num_bits = 6;
700 fields[0].out_value = &cmd;
701
702 fields[1].num_bits = 27;
703 fields[1].out_value = packet;
704
705 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
706
707 /* rest of packet is a cacheline: 8 instructions, with parity */
708 fields[0].num_bits = 32;
709 fields[0].out_value = packet;
710
711 fields[1].num_bits = 1;
712 fields[1].out_value = &cmd;
713
714 for (word = 0; word < 8; word++)
715 {
716 buf_set_u32(packet, 0, 32, buffer[word]);
717
718 uint32_t value;
719 memcpy(&value, packet, sizeof(uint32_t));
720 cmd = parity(value);
721
722 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
723 }
724
725 return jtag_execute_queue();
726 }
727
728 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
729 {
730 struct xscale_common *xscale = target_to_xscale(target);
731 uint8_t packet[4];
732 uint8_t cmd;
733 struct scan_field fields[2];
734
735 xscale_jtag_set_instr(target->tap,
736 XSCALE_LDIC << xscale->xscale_variant,
737 TAP_IDLE);
738
739 /* CMD for invalidate IC line b000, bits [6:4] b000 */
740 buf_set_u32(&cmd, 0, 6, 0x0);
741
742 /* virtual address of desired cache line */
743 buf_set_u32(packet, 0, 27, va >> 5);
744
745 memset(&fields, 0, sizeof fields);
746
747 fields[0].num_bits = 6;
748 fields[0].out_value = &cmd;
749
750 fields[1].num_bits = 27;
751 fields[1].out_value = packet;
752
753 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
754
755 return ERROR_OK;
756 }
757
758 static int xscale_update_vectors(struct target *target)
759 {
760 struct xscale_common *xscale = target_to_xscale(target);
761 int i;
762 int retval;
763
764 uint32_t low_reset_branch, high_reset_branch;
765
766 for (i = 1; i < 8; i++)
767 {
768 /* if there's a static vector specified for this exception, override */
769 if (xscale->static_high_vectors_set & (1 << i))
770 {
771 xscale->high_vectors[i] = xscale->static_high_vectors[i];
772 }
773 else
774 {
775 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
776 if (retval == ERROR_TARGET_TIMEOUT)
777 return retval;
778 if (retval != ERROR_OK)
779 {
780 /* Some of these reads will fail as part of normal execution */
781 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
782 }
783 }
784 }
785
786 for (i = 1; i < 8; i++)
787 {
788 if (xscale->static_low_vectors_set & (1 << i))
789 {
790 xscale->low_vectors[i] = xscale->static_low_vectors[i];
791 }
792 else
793 {
794 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
795 if (retval == ERROR_TARGET_TIMEOUT)
796 return retval;
797 if (retval != ERROR_OK)
798 {
799 /* Some of these reads will fail as part of normal execution */
800 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
801 }
802 }
803 }
804
805 /* calculate branches to debug handler */
806 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
807 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
808
809 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
810 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
811
812 /* invalidate and load exception vectors in mini i-cache */
813 xscale_invalidate_ic_line(target, 0x0);
814 xscale_invalidate_ic_line(target, 0xffff0000);
815
816 xscale_load_ic(target, 0x0, xscale->low_vectors);
817 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
818
819 return ERROR_OK;
820 }
821
822 static int xscale_arch_state(struct target *target)
823 {
824 struct xscale_common *xscale = target_to_xscale(target);
825 struct arm *armv4_5 = &xscale->armv4_5_common;
826
827 static const char *state[] =
828 {
829 "disabled", "enabled"
830 };
831
832 static const char *arch_dbg_reason[] =
833 {
834 "", "\n(processor reset)", "\n(trace buffer full)"
835 };
836
837 if (armv4_5->common_magic != ARM_COMMON_MAGIC)
838 {
839 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
840 return ERROR_INVALID_ARGUMENTS;
841 }
842
843 arm_arch_state(target);
844 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s%s",
845 state[xscale->armv4_5_mmu.mmu_enabled],
846 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
847 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
848 arch_dbg_reason[xscale->arch_debug_reason]);
849
850 return ERROR_OK;
851 }
852
853 static int xscale_poll(struct target *target)
854 {
855 int retval = ERROR_OK;
856
857 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
858 {
859 enum target_state previous_state = target->state;
860 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
861 {
862
863 /* there's data to read from the tx register, we entered debug state */
864 target->state = TARGET_HALTED;
865
866 /* process debug entry, fetching current mode regs */
867 retval = xscale_debug_entry(target);
868 }
869 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
870 {
871 LOG_USER("error while polling TX register, reset CPU");
872 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
873 target->state = TARGET_HALTED;
874 }
875
876 /* debug_entry could have overwritten target state (i.e. immediate resume)
877 * don't signal event handlers in that case
878 */
879 if (target->state != TARGET_HALTED)
880 return ERROR_OK;
881
882 /* if target was running, signal that we halted
883 * otherwise we reentered from debug execution */
884 if (previous_state == TARGET_RUNNING)
885 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
886 else
887 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
888 }
889
890 return retval;
891 }
892
893 static int xscale_debug_entry(struct target *target)
894 {
895 struct xscale_common *xscale = target_to_xscale(target);
896 struct arm *armv4_5 = &xscale->armv4_5_common;
897 uint32_t pc;
898 uint32_t buffer[10];
899 int i;
900 int retval;
901 uint32_t moe;
902
903 /* clear external dbg break (will be written on next DCSR read) */
904 xscale->external_debug_break = 0;
905 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
906 return retval;
907
908 /* get r0, pc, r1 to r7 and cpsr */
909 if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
910 return retval;
911
912 /* move r0 from buffer to register cache */
913 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
914 armv4_5->core_cache->reg_list[0].dirty = 1;
915 armv4_5->core_cache->reg_list[0].valid = 1;
916 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
917
918 /* move pc from buffer to register cache */
919 buf_set_u32(armv4_5->pc->value, 0, 32, buffer[1]);
920 armv4_5->pc->dirty = 1;
921 armv4_5->pc->valid = 1;
922 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
923
924 /* move data from buffer to register cache */
925 for (i = 1; i <= 7; i++)
926 {
927 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
928 armv4_5->core_cache->reg_list[i].dirty = 1;
929 armv4_5->core_cache->reg_list[i].valid = 1;
930 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
931 }
932
933 arm_set_cpsr(armv4_5, buffer[9]);
934 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
935
936 if (!is_arm_mode(armv4_5->core_mode))
937 {
938 target->state = TARGET_UNKNOWN;
939 LOG_ERROR("cpsr contains invalid mode value - communication failure");
940 return ERROR_TARGET_FAILURE;
941 }
942 LOG_DEBUG("target entered debug state in %s mode",
943 arm_mode_name(armv4_5->core_mode));
944
945 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
946 if (armv4_5->spsr) {
947 xscale_receive(target, buffer, 8);
948 buf_set_u32(armv4_5->spsr->value, 0, 32, buffer[7]);
949 armv4_5->spsr->dirty = false;
950 armv4_5->spsr->valid = true;
951 }
952 else
953 {
954 /* r8 to r14, but no spsr */
955 xscale_receive(target, buffer, 7);
956 }
957
958 /* move data from buffer to right banked register in cache */
959 for (i = 8; i <= 14; i++)
960 {
961 struct reg *r = arm_reg_current(armv4_5, i);
962
963 buf_set_u32(r->value, 0, 32, buffer[i - 8]);
964 r->dirty = false;
965 r->valid = true;
966 }
967
968 /* examine debug reason */
969 xscale_read_dcsr(target);
970 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
971
972 /* stored PC (for calculating fixup) */
973 pc = buf_get_u32(armv4_5->pc->value, 0, 32);
974
975 switch (moe)
976 {
977 case 0x0: /* Processor reset */
978 target->debug_reason = DBG_REASON_DBGRQ;
979 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
980 pc -= 4;
981 break;
982 case 0x1: /* Instruction breakpoint hit */
983 target->debug_reason = DBG_REASON_BREAKPOINT;
984 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
985 pc -= 4;
986 break;
987 case 0x2: /* Data breakpoint hit */
988 target->debug_reason = DBG_REASON_WATCHPOINT;
989 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
990 pc -= 4;
991 break;
992 case 0x3: /* BKPT instruction executed */
993 target->debug_reason = DBG_REASON_BREAKPOINT;
994 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
995 pc -= 4;
996 break;
997 case 0x4: /* Ext. debug event */
998 target->debug_reason = DBG_REASON_DBGRQ;
999 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1000 pc -= 4;
1001 break;
1002 case 0x5: /* Vector trap occured */
1003 target->debug_reason = DBG_REASON_BREAKPOINT;
1004 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1005 pc -= 4;
1006 break;
1007 case 0x6: /* Trace buffer full break */
1008 target->debug_reason = DBG_REASON_DBGRQ;
1009 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1010 pc -= 4;
1011 break;
1012 case 0x7: /* Reserved (may flag Hot-Debug support) */
1013 default:
1014 LOG_ERROR("Method of Entry is 'Reserved'");
1015 exit(-1);
1016 break;
1017 }
1018
1019 /* apply PC fixup */
1020 buf_set_u32(armv4_5->pc->value, 0, 32, pc);
1021
1022 /* on the first debug entry, identify cache type */
1023 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1024 {
1025 uint32_t cache_type_reg;
1026
1027 /* read cp15 cache type register */
1028 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1029 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1030
1031 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1032 }
1033
1034 /* examine MMU and Cache settings */
1035 /* read cp15 control register */
1036 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1037 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1038 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1039 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1040 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1041
1042 /* tracing enabled, read collected trace data */
1043 if (xscale->trace.buffer_enabled)
1044 {
1045 xscale_read_trace(target);
1046 xscale->trace.buffer_fill--;
1047
1048 /* resume if we're still collecting trace data */
1049 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1050 && (xscale->trace.buffer_fill > 0))
1051 {
1052 xscale_resume(target, 1, 0x0, 1, 0);
1053 }
1054 else
1055 {
1056 xscale->trace.buffer_enabled = 0;
1057 }
1058 }
1059
1060 return ERROR_OK;
1061 }
1062
1063 static int xscale_halt(struct target *target)
1064 {
1065 struct xscale_common *xscale = target_to_xscale(target);
1066
1067 LOG_DEBUG("target->state: %s",
1068 target_state_name(target));
1069
1070 if (target->state == TARGET_HALTED)
1071 {
1072 LOG_DEBUG("target was already halted");
1073 return ERROR_OK;
1074 }
1075 else if (target->state == TARGET_UNKNOWN)
1076 {
1077 /* this must not happen for a xscale target */
1078 LOG_ERROR("target was in unknown state when halt was requested");
1079 return ERROR_TARGET_INVALID;
1080 }
1081 else if (target->state == TARGET_RESET)
1082 {
1083 LOG_DEBUG("target->state == TARGET_RESET");
1084 }
1085 else
1086 {
1087 /* assert external dbg break */
1088 xscale->external_debug_break = 1;
1089 xscale_read_dcsr(target);
1090
1091 target->debug_reason = DBG_REASON_DBGRQ;
1092 }
1093
1094 return ERROR_OK;
1095 }
1096
1097 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1098 {
1099 struct xscale_common *xscale = target_to_xscale(target);
1100 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1101 int retval;
1102
1103 if (xscale->ibcr0_used)
1104 {
1105 struct breakpoint *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1106
1107 if (ibcr0_bp)
1108 {
1109 xscale_unset_breakpoint(target, ibcr0_bp);
1110 }
1111 else
1112 {
1113 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1114 exit(-1);
1115 }
1116 }
1117
1118 if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
1119 return retval;
1120
1121 return ERROR_OK;
1122 }
1123
1124 static int xscale_disable_single_step(struct target *target)
1125 {
1126 struct xscale_common *xscale = target_to_xscale(target);
1127 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1128 int retval;
1129
1130 if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
1131 return retval;
1132
1133 return ERROR_OK;
1134 }
1135
1136 static void xscale_enable_watchpoints(struct target *target)
1137 {
1138 struct watchpoint *watchpoint = target->watchpoints;
1139
1140 while (watchpoint)
1141 {
1142 if (watchpoint->set == 0)
1143 xscale_set_watchpoint(target, watchpoint);
1144 watchpoint = watchpoint->next;
1145 }
1146 }
1147
1148 static void xscale_enable_breakpoints(struct target *target)
1149 {
1150 struct breakpoint *breakpoint = target->breakpoints;
1151
1152 /* set any pending breakpoints */
1153 while (breakpoint)
1154 {
1155 if (breakpoint->set == 0)
1156 xscale_set_breakpoint(target, breakpoint);
1157 breakpoint = breakpoint->next;
1158 }
1159 }
1160
1161 static int xscale_resume(struct target *target, int current,
1162 uint32_t address, int handle_breakpoints, int debug_execution)
1163 {
1164 struct xscale_common *xscale = target_to_xscale(target);
1165 struct arm *armv4_5 = &xscale->armv4_5_common;
1166 struct breakpoint *breakpoint = target->breakpoints;
1167 uint32_t current_pc;
1168 int retval;
1169 int i;
1170
1171 LOG_DEBUG("-");
1172
1173 if (target->state != TARGET_HALTED)
1174 {
1175 LOG_WARNING("target not halted");
1176 return ERROR_TARGET_NOT_HALTED;
1177 }
1178
1179 if (!debug_execution)
1180 {
1181 target_free_all_working_areas(target);
1182 }
1183
1184 /* update vector tables */
1185 if ((retval = xscale_update_vectors(target)) != ERROR_OK)
1186 return retval;
1187
1188 /* current = 1: continue on current pc, otherwise continue at <address> */
1189 if (!current)
1190 buf_set_u32(armv4_5->pc->value, 0, 32, address);
1191
1192 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1193
1194 /* if we're at the reset vector, we have to simulate the branch */
1195 if (current_pc == 0x0)
1196 {
1197 arm_simulate_step(target, NULL);
1198 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1199 }
1200
1201 /* the front-end may request us not to handle breakpoints */
1202 if (handle_breakpoints)
1203 {
1204 breakpoint = breakpoint_find(target,
1205 buf_get_u32(armv4_5->pc->value, 0, 32));
1206 if (breakpoint != NULL)
1207 {
1208 uint32_t next_pc;
1209 int saved_trace_buffer_enabled;
1210
1211 /* there's a breakpoint at the current PC, we have to step over it */
1212 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1213 xscale_unset_breakpoint(target, breakpoint);
1214
1215 /* calculate PC of next instruction */
1216 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1217 {
1218 uint32_t current_opcode;
1219 target_read_u32(target, current_pc, &current_opcode);
1220 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1221 }
1222
1223 LOG_DEBUG("enable single-step");
1224 xscale_enable_single_step(target, next_pc);
1225
1226 /* restore banked registers */
1227 retval = xscale_restore_banked(target);
1228
1229 /* send resume request */
1230 xscale_send_u32(target, 0x30);
1231
1232 /* send CPSR */
1233 xscale_send_u32(target,
1234 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1235 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1236 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1237
1238 for (i = 7; i >= 0; i--)
1239 {
1240 /* send register */
1241 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1242 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1243 }
1244
1245 /* send PC */
1246 xscale_send_u32(target,
1247 buf_get_u32(armv4_5->pc->value, 0, 32));
1248 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32,
1249 buf_get_u32(armv4_5->pc->value, 0, 32));
1250
1251 /* disable trace data collection in xscale_debug_entry() */
1252 saved_trace_buffer_enabled = xscale->trace.buffer_enabled;
1253 xscale->trace.buffer_enabled = 0;
1254
1255 /* wait for and process debug entry */
1256 xscale_debug_entry(target);
1257
1258 /* re-enable trace buffer, if enabled previously */
1259 xscale->trace.buffer_enabled = saved_trace_buffer_enabled;
1260
1261 LOG_DEBUG("disable single-step");
1262 xscale_disable_single_step(target);
1263
1264 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1265 xscale_set_breakpoint(target, breakpoint);
1266 }
1267 }
1268
1269 /* enable any pending breakpoints and watchpoints */
1270 xscale_enable_breakpoints(target);
1271 xscale_enable_watchpoints(target);
1272
1273 /* restore banked registers */
1274 retval = xscale_restore_banked(target);
1275
1276 /* send resume request (command 0x30 or 0x31)
1277 * clean the trace buffer if it is to be enabled (0x62) */
1278 if (xscale->trace.buffer_enabled)
1279 {
1280 xscale_send_u32(target, 0x62);
1281 xscale_send_u32(target, 0x31);
1282 }
1283 else
1284 xscale_send_u32(target, 0x30);
1285
1286 /* send CPSR */
1287 xscale_send_u32(target, buf_get_u32(armv4_5->cpsr->value, 0, 32));
1288 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1289 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1290
1291 for (i = 7; i >= 0; i--)
1292 {
1293 /* send register */
1294 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1295 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1296 }
1297
1298 /* send PC */
1299 xscale_send_u32(target, buf_get_u32(armv4_5->pc->value, 0, 32));
1300 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1301 buf_get_u32(armv4_5->pc->value, 0, 32));
1302
1303 target->debug_reason = DBG_REASON_NOTHALTED;
1304
1305 if (!debug_execution)
1306 {
1307 /* registers are now invalid */
1308 register_cache_invalidate(armv4_5->core_cache);
1309 target->state = TARGET_RUNNING;
1310 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1311 }
1312 else
1313 {
1314 target->state = TARGET_DEBUG_RUNNING;
1315 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1316 }
1317
1318 LOG_DEBUG("target resumed");
1319
1320 return ERROR_OK;
1321 }
1322
1323 static int xscale_step_inner(struct target *target, int current,
1324 uint32_t address, int handle_breakpoints)
1325 {
1326 struct xscale_common *xscale = target_to_xscale(target);
1327 struct arm *armv4_5 = &xscale->armv4_5_common;
1328 uint32_t next_pc;
1329 int retval;
1330 int i;
1331
1332 target->debug_reason = DBG_REASON_SINGLESTEP;
1333
1334 /* calculate PC of next instruction */
1335 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1336 {
1337 uint32_t current_opcode, current_pc;
1338 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1339
1340 target_read_u32(target, current_pc, &current_opcode);
1341 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1342 return retval;
1343 }
1344
1345 LOG_DEBUG("enable single-step");
1346 if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
1347 return retval;
1348
1349 /* restore banked registers */
1350 if ((retval = xscale_restore_banked(target)) != ERROR_OK)
1351 return retval;
1352
1353 /* send resume request (command 0x30 or 0x31)
1354 * clean the trace buffer if it is to be enabled (0x62) */
1355 if (xscale->trace.buffer_enabled)
1356 {
1357 if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
1358 return retval;
1359 if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
1360 return retval;
1361 }
1362 else
1363 if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
1364 return retval;
1365
1366 /* send CPSR */
1367 retval = xscale_send_u32(target,
1368 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1369 if (retval != ERROR_OK)
1370 return retval;
1371 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1372 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1373
1374 for (i = 7; i >= 0; i--)
1375 {
1376 /* send register */
1377 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
1378 return retval;
1379 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1380 }
1381
1382 /* send PC */
1383 retval = xscale_send_u32(target,
1384 buf_get_u32(armv4_5->pc->value, 0, 32));
1385 if (retval != ERROR_OK)
1386 return retval;
1387 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1388 buf_get_u32(armv4_5->pc->value, 0, 32));
1389
1390 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1391
1392 /* registers are now invalid */
1393 register_cache_invalidate(armv4_5->core_cache);
1394
1395 /* wait for and process debug entry */
1396 if ((retval = xscale_debug_entry(target)) != ERROR_OK)
1397 return retval;
1398
1399 LOG_DEBUG("disable single-step");
1400 if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
1401 return retval;
1402
1403 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1404
1405 return ERROR_OK;
1406 }
1407
1408 static int xscale_step(struct target *target, int current,
1409 uint32_t address, int handle_breakpoints)
1410 {
1411 struct arm *armv4_5 = target_to_arm(target);
1412 struct breakpoint *breakpoint = NULL;
1413
1414 uint32_t current_pc;
1415 int retval;
1416
1417 if (target->state != TARGET_HALTED)
1418 {
1419 LOG_WARNING("target not halted");
1420 return ERROR_TARGET_NOT_HALTED;
1421 }
1422
1423 /* current = 1: continue on current pc, otherwise continue at <address> */
1424 if (!current)
1425 buf_set_u32(armv4_5->pc->value, 0, 32, address);
1426
1427 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1428
1429 /* if we're at the reset vector, we have to simulate the step */
1430 if (current_pc == 0x0)
1431 {
1432 if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
1433 return retval;
1434 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1435
1436 target->debug_reason = DBG_REASON_SINGLESTEP;
1437 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1438
1439 return ERROR_OK;
1440 }
1441
1442 /* the front-end may request us not to handle breakpoints */
1443 if (handle_breakpoints)
1444 breakpoint = breakpoint_find(target,
1445 buf_get_u32(armv4_5->pc->value, 0, 32));
1446 if (breakpoint != NULL) {
1447 retval = xscale_unset_breakpoint(target, breakpoint);
1448 if (retval != ERROR_OK)
1449 return retval;
1450 }
1451
1452 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1453
1454 if (breakpoint)
1455 {
1456 xscale_set_breakpoint(target, breakpoint);
1457 }
1458
1459 LOG_DEBUG("target stepped");
1460
1461 return ERROR_OK;
1462
1463 }
1464
1465 static int xscale_assert_reset(struct target *target)
1466 {
1467 struct xscale_common *xscale = target_to_xscale(target);
1468
1469 LOG_DEBUG("target->state: %s",
1470 target_state_name(target));
1471
1472 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1473 * end up in T-L-R, which would reset JTAG
1474 */
1475 xscale_jtag_set_instr(target->tap,
1476 XSCALE_SELDCSR << xscale->xscale_variant,
1477 TAP_IDLE);
1478
1479 /* set Hold reset, Halt mode and Trap Reset */
1480 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1481 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1482 xscale_write_dcsr(target, 1, 0);
1483
1484 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1485 xscale_jtag_set_instr(target->tap, ~0, TAP_IDLE);
1486 jtag_execute_queue();
1487
1488 /* assert reset */
1489 jtag_add_reset(0, 1);
1490
1491 /* sleep 1ms, to be sure we fulfill any requirements */
1492 jtag_add_sleep(1000);
1493 jtag_execute_queue();
1494
1495 target->state = TARGET_RESET;
1496
1497 if (target->reset_halt)
1498 {
1499 int retval;
1500 if ((retval = target_halt(target)) != ERROR_OK)
1501 return retval;
1502 }
1503
1504 return ERROR_OK;
1505 }
1506
1507 static int xscale_deassert_reset(struct target *target)
1508 {
1509 struct xscale_common *xscale = target_to_xscale(target);
1510 struct breakpoint *breakpoint = target->breakpoints;
1511
1512 LOG_DEBUG("-");
1513
1514 xscale->ibcr_available = 2;
1515 xscale->ibcr0_used = 0;
1516 xscale->ibcr1_used = 0;
1517
1518 xscale->dbr_available = 2;
1519 xscale->dbr0_used = 0;
1520 xscale->dbr1_used = 0;
1521
1522 /* mark all hardware breakpoints as unset */
1523 while (breakpoint)
1524 {
1525 if (breakpoint->type == BKPT_HARD)
1526 {
1527 breakpoint->set = 0;
1528 }
1529 breakpoint = breakpoint->next;
1530 }
1531
1532 register_cache_invalidate(xscale->armv4_5_common.core_cache);
1533
1534 /* FIXME mark hardware watchpoints got unset too. Also,
1535 * at least some of the XScale registers are invalid...
1536 */
1537
1538 /*
1539 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1540 * contents got invalidated. Safer to force that, so writing new
1541 * contents can't ever fail..
1542 */
1543 {
1544 uint32_t address;
1545 unsigned buf_cnt;
1546 const uint8_t *buffer = xscale_debug_handler;
1547 int retval;
1548
1549 /* release SRST */
1550 jtag_add_reset(0, 0);
1551
1552 /* wait 300ms; 150 and 100ms were not enough */
1553 jtag_add_sleep(300*1000);
1554
1555 jtag_add_runtest(2030, TAP_IDLE);
1556 jtag_execute_queue();
1557
1558 /* set Hold reset, Halt mode and Trap Reset */
1559 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1560 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1561 xscale_write_dcsr(target, 1, 0);
1562
1563 /* Load the debug handler into the mini-icache. Since
1564 * it's using halt mode (not monitor mode), it runs in
1565 * "Special Debug State" for access to registers, memory,
1566 * coprocessors, trace data, etc.
1567 */
1568 address = xscale->handler_address;
1569 for (unsigned binary_size = sizeof xscale_debug_handler - 1;
1570 binary_size > 0;
1571 binary_size -= buf_cnt, buffer += buf_cnt)
1572 {
1573 uint32_t cache_line[8];
1574 unsigned i;
1575
1576 buf_cnt = binary_size;
1577 if (buf_cnt > 32)
1578 buf_cnt = 32;
1579
1580 for (i = 0; i < buf_cnt; i += 4)
1581 {
1582 /* convert LE buffer to host-endian uint32_t */
1583 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1584 }
1585
1586 for (; i < 32; i += 4)
1587 {
1588 cache_line[i / 4] = 0xe1a08008;
1589 }
1590
1591 /* only load addresses other than the reset vectors */
1592 if ((address % 0x400) != 0x0)
1593 {
1594 retval = xscale_load_ic(target, address,
1595 cache_line);
1596 if (retval != ERROR_OK)
1597 return retval;
1598 }
1599
1600 address += buf_cnt;
1601 };
1602
1603 retval = xscale_load_ic(target, 0x0,
1604 xscale->low_vectors);
1605 if (retval != ERROR_OK)
1606 return retval;
1607 retval = xscale_load_ic(target, 0xffff0000,
1608 xscale->high_vectors);
1609 if (retval != ERROR_OK)
1610 return retval;
1611
1612 jtag_add_runtest(30, TAP_IDLE);
1613
1614 jtag_add_sleep(100000);
1615
1616 /* set Hold reset, Halt mode and Trap Reset */
1617 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1618 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1619 xscale_write_dcsr(target, 1, 0);
1620
1621 /* clear Hold reset to let the target run (should enter debug handler) */
1622 xscale_write_dcsr(target, 0, 1);
1623 target->state = TARGET_RUNNING;
1624
1625 if (!target->reset_halt)
1626 {
1627 jtag_add_sleep(10000);
1628
1629 /* we should have entered debug now */
1630 xscale_debug_entry(target);
1631 target->state = TARGET_HALTED;
1632
1633 /* resume the target */
1634 xscale_resume(target, 1, 0x0, 1, 0);
1635 }
1636 }
1637
1638 return ERROR_OK;
1639 }
1640
1641 static int xscale_read_core_reg(struct target *target, struct reg *r,
1642 int num, enum arm_mode mode)
1643 {
1644 /** \todo add debug handler support for core register reads */
1645 LOG_ERROR("not implemented");
1646 return ERROR_OK;
1647 }
1648
1649 static int xscale_write_core_reg(struct target *target, struct reg *r,
1650 int num, enum arm_mode mode, uint32_t value)
1651 {
1652 /** \todo add debug handler support for core register writes */
1653 LOG_ERROR("not implemented");
1654 return ERROR_OK;
1655 }
1656
1657 static int xscale_full_context(struct target *target)
1658 {
1659 struct arm *armv4_5 = target_to_arm(target);
1660
1661 uint32_t *buffer;
1662
1663 int i, j;
1664
1665 LOG_DEBUG("-");
1666
1667 if (target->state != TARGET_HALTED)
1668 {
1669 LOG_WARNING("target not halted");
1670 return ERROR_TARGET_NOT_HALTED;
1671 }
1672
1673 buffer = malloc(4 * 8);
1674
1675 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1676 * we can't enter User mode on an XScale (unpredictable),
1677 * but User shares registers with SYS
1678 */
1679 for (i = 1; i < 7; i++)
1680 {
1681 enum arm_mode mode = armv4_5_number_to_mode(i);
1682 bool valid = true;
1683 struct reg *r;
1684
1685 if (mode == ARM_MODE_USR)
1686 continue;
1687
1688 /* check if there are invalid registers in the current mode
1689 */
1690 for (j = 0; valid && j <= 16; j++)
1691 {
1692 if (!ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1693 mode, j).valid)
1694 valid = false;
1695 }
1696 if (valid)
1697 continue;
1698
1699 /* request banked registers */
1700 xscale_send_u32(target, 0x0);
1701
1702 /* send CPSR for desired bank mode */
1703 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1704
1705 /* get banked registers: r8 to r14; and SPSR
1706 * except in USR/SYS mode
1707 */
1708 if (mode != ARM_MODE_SYS) {
1709 /* SPSR */
1710 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1711 mode, 16);
1712
1713 xscale_receive(target, buffer, 8);
1714
1715 buf_set_u32(r->value, 0, 32, buffer[7]);
1716 r->dirty = false;
1717 r->valid = true;
1718 } else {
1719 xscale_receive(target, buffer, 7);
1720 }
1721
1722 /* move data from buffer to register cache */
1723 for (j = 8; j <= 14; j++)
1724 {
1725 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1726 mode, j);
1727
1728 buf_set_u32(r->value, 0, 32, buffer[j - 8]);
1729 r->dirty = false;
1730 r->valid = true;
1731 }
1732 }
1733
1734 free(buffer);
1735
1736 return ERROR_OK;
1737 }
1738
1739 static int xscale_restore_banked(struct target *target)
1740 {
1741 struct arm *armv4_5 = target_to_arm(target);
1742
1743 int i, j;
1744
1745 if (target->state != TARGET_HALTED)
1746 {
1747 LOG_WARNING("target not halted");
1748 return ERROR_TARGET_NOT_HALTED;
1749 }
1750
1751 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1752 * and check if any banked registers need to be written. Ignore
1753 * USR mode (number 0) in favor of SYS; we can't enter User mode on
1754 * an XScale (unpredictable), but they share all registers.
1755 */
1756 for (i = 1; i < 7; i++)
1757 {
1758 enum arm_mode mode = armv4_5_number_to_mode(i);
1759 struct reg *r;
1760
1761 if (mode == ARM_MODE_USR)
1762 continue;
1763
1764 /* check if there are dirty registers in this mode */
1765 for (j = 8; j <= 14; j++)
1766 {
1767 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1768 mode, j).dirty)
1769 goto dirty;
1770 }
1771
1772 /* if not USR/SYS, check if the SPSR needs to be written */
1773 if (mode != ARM_MODE_SYS)
1774 {
1775 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1776 mode, 16).dirty)
1777 goto dirty;
1778 }
1779
1780 /* there's nothing to flush for this mode */
1781 continue;
1782
1783 dirty:
1784 /* command 0x1: "send banked registers" */
1785 xscale_send_u32(target, 0x1);
1786
1787 /* send CPSR for desired mode */
1788 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1789
1790 /* send r8 to r14/lr ... only FIQ needs more than r13..r14,
1791 * but this protocol doesn't understand that nuance.
1792 */
1793 for (j = 8; j <= 14; j++) {
1794 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1795 mode, j);
1796 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1797 r->dirty = false;
1798 }
1799
1800 /* send spsr if not in USR/SYS mode */
1801 if (mode != ARM_MODE_SYS) {
1802 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1803 mode, 16);
1804 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1805 r->dirty = false;
1806 }
1807 }
1808
1809 return ERROR_OK;
1810 }
1811
1812 static int xscale_read_memory(struct target *target, uint32_t address,
1813 uint32_t size, uint32_t count, uint8_t *buffer)
1814 {
1815 struct xscale_common *xscale = target_to_xscale(target);
1816 uint32_t *buf32;
1817 uint32_t i;
1818 int retval;
1819
1820 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1821
1822 if (target->state != TARGET_HALTED)
1823 {
1824 LOG_WARNING("target not halted");
1825 return ERROR_TARGET_NOT_HALTED;
1826 }
1827
1828 /* sanitize arguments */
1829 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1830 return ERROR_INVALID_ARGUMENTS;
1831
1832 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1833 return ERROR_TARGET_UNALIGNED_ACCESS;
1834
1835 /* send memory read request (command 0x1n, n: access size) */
1836 if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
1837 return retval;
1838
1839 /* send base address for read request */
1840 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1841 return retval;
1842
1843 /* send number of requested data words */
1844 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1845 return retval;
1846
1847 /* receive data from target (count times 32-bit words in host endianness) */
1848 buf32 = malloc(4 * count);
1849 if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
1850 return retval;
1851
1852 /* extract data from host-endian buffer into byte stream */
1853 for (i = 0; i < count; i++)
1854 {
1855 switch (size)
1856 {
1857 case 4:
1858 target_buffer_set_u32(target, buffer, buf32[i]);
1859 buffer += 4;
1860 break;
1861 case 2:
1862 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1863 buffer += 2;
1864 break;
1865 case 1:
1866 *buffer++ = buf32[i] & 0xff;
1867 break;
1868 default:
1869 LOG_ERROR("invalid read size");
1870 return ERROR_INVALID_ARGUMENTS;
1871 }
1872 }
1873
1874 free(buf32);
1875
1876 /* examine DCSR, to see if Sticky Abort (SA) got set */
1877 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1878 return retval;
1879 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1880 {
1881 /* clear SA bit */
1882 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1883 return retval;
1884
1885 return ERROR_TARGET_DATA_ABORT;
1886 }
1887
1888 return ERROR_OK;
1889 }
1890
1891 static int xscale_read_phys_memory(struct target *target, uint32_t address,
1892 uint32_t size, uint32_t count, uint8_t *buffer)
1893 {
1894 struct xscale_common *xscale = target_to_xscale(target);
1895
1896 /* with MMU inactive, there are only physical addresses */
1897 if (!xscale->armv4_5_mmu.mmu_enabled)
1898 return xscale_read_memory(target, address, size, count, buffer);
1899
1900 /** \todo: provide a non-stub implementation of this routine. */
1901 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1902 target_name(target), __func__);
1903 return ERROR_FAIL;
1904 }
1905
1906 static int xscale_write_memory(struct target *target, uint32_t address,
1907 uint32_t size, uint32_t count, uint8_t *buffer)
1908 {
1909 struct xscale_common *xscale = target_to_xscale(target);
1910 int retval;
1911
1912 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1913
1914 if (target->state != TARGET_HALTED)
1915 {
1916 LOG_WARNING("target not halted");
1917 return ERROR_TARGET_NOT_HALTED;
1918 }
1919
1920 /* sanitize arguments */
1921 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1922 return ERROR_INVALID_ARGUMENTS;
1923
1924 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1925 return ERROR_TARGET_UNALIGNED_ACCESS;
1926
1927 /* send memory write request (command 0x2n, n: access size) */
1928 if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
1929 return retval;
1930
1931 /* send base address for read request */
1932 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1933 return retval;
1934
1935 /* send number of requested data words to be written*/
1936 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1937 return retval;
1938
1939 /* extract data from host-endian buffer into byte stream */
1940 #if 0
1941 for (i = 0; i < count; i++)
1942 {
1943 switch (size)
1944 {
1945 case 4:
1946 value = target_buffer_get_u32(target, buffer);
1947 xscale_send_u32(target, value);
1948 buffer += 4;
1949 break;
1950 case 2:
1951 value = target_buffer_get_u16(target, buffer);
1952 xscale_send_u32(target, value);
1953 buffer += 2;
1954 break;
1955 case 1:
1956 value = *buffer;
1957 xscale_send_u32(target, value);
1958 buffer += 1;
1959 break;
1960 default:
1961 LOG_ERROR("should never get here");
1962 exit(-1);
1963 }
1964 }
1965 #endif
1966 if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
1967 return retval;
1968
1969 /* examine DCSR, to see if Sticky Abort (SA) got set */
1970 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1971 return retval;
1972 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1973 {
1974 /* clear SA bit */
1975 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1976 return retval;
1977
1978 return ERROR_TARGET_DATA_ABORT;
1979 }
1980
1981 return ERROR_OK;
1982 }
1983
1984 static int xscale_write_phys_memory(struct target *target, uint32_t address,
1985 uint32_t size, uint32_t count, uint8_t *buffer)
1986 {
1987 struct xscale_common *xscale = target_to_xscale(target);
1988
1989 /* with MMU inactive, there are only physical addresses */
1990 if (!xscale->armv4_5_mmu.mmu_enabled)
1991 return xscale_read_memory(target, address, size, count, buffer);
1992
1993 /** \todo: provide a non-stub implementation of this routine. */
1994 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1995 target_name(target), __func__);
1996 return ERROR_FAIL;
1997 }
1998
1999 static int xscale_bulk_write_memory(struct target *target, uint32_t address,
2000 uint32_t count, uint8_t *buffer)
2001 {
2002 return xscale_write_memory(target, address, 4, count, buffer);
2003 }
2004
2005 static int xscale_get_ttb(struct target *target, uint32_t *result)
2006 {
2007 struct xscale_common *xscale = target_to_xscale(target);
2008 uint32_t ttb;
2009 int retval;
2010
2011 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
2012 if (retval != ERROR_OK)
2013 return retval;
2014 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
2015
2016 *result = ttb;
2017
2018 return ERROR_OK;
2019 }
2020
2021 static int xscale_disable_mmu_caches(struct target *target, int mmu,
2022 int d_u_cache, int i_cache)
2023 {
2024 struct xscale_common *xscale = target_to_xscale(target);
2025 uint32_t cp15_control;
2026 int retval;
2027
2028 /* read cp15 control register */
2029 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2030 if (retval !=ERROR_OK)
2031 return retval;
2032 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2033
2034 if (mmu)
2035 cp15_control &= ~0x1U;
2036
2037 if (d_u_cache)
2038 {
2039 /* clean DCache */
2040 retval = xscale_send_u32(target, 0x50);
2041 if (retval !=ERROR_OK)
2042 return retval;
2043 retval = xscale_send_u32(target, xscale->cache_clean_address);
2044 if (retval !=ERROR_OK)
2045 return retval;
2046
2047 /* invalidate DCache */
2048 retval = xscale_send_u32(target, 0x51);
2049 if (retval !=ERROR_OK)
2050 return retval;
2051
2052 cp15_control &= ~0x4U;
2053 }
2054
2055 if (i_cache)
2056 {
2057 /* invalidate ICache */
2058 retval = xscale_send_u32(target, 0x52);
2059 if (retval !=ERROR_OK)
2060 return retval;
2061 cp15_control &= ~0x1000U;
2062 }
2063
2064 /* write new cp15 control register */
2065 retval = xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2066 if (retval !=ERROR_OK)
2067 return retval;
2068
2069 /* execute cpwait to ensure outstanding operations complete */
2070 retval = xscale_send_u32(target, 0x53);
2071 return retval;
2072 }
2073
2074 static int xscale_enable_mmu_caches(struct target *target, int mmu,
2075 int d_u_cache, int i_cache)
2076 {
2077 struct xscale_common *xscale = target_to_xscale(target);
2078 uint32_t cp15_control;
2079 int retval;
2080
2081 /* read cp15 control register */
2082 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2083 if (retval !=ERROR_OK)
2084 return retval;
2085 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2086
2087 if (mmu)
2088 cp15_control |= 0x1U;
2089
2090 if (d_u_cache)
2091 cp15_control |= 0x4U;
2092
2093 if (i_cache)
2094 cp15_control |= 0x1000U;
2095
2096 /* write new cp15 control register */
2097 retval = xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2098 if (retval !=ERROR_OK)
2099 return retval;
2100
2101 /* execute cpwait to ensure outstanding operations complete */
2102 retval = xscale_send_u32(target, 0x53);
2103 return retval;
2104 }
2105
2106 static int xscale_set_breakpoint(struct target *target,
2107 struct breakpoint *breakpoint)
2108 {
2109 int retval;
2110 struct xscale_common *xscale = target_to_xscale(target);
2111
2112 if (target->state != TARGET_HALTED)
2113 {
2114 LOG_WARNING("target not halted");
2115 return ERROR_TARGET_NOT_HALTED;
2116 }
2117
2118 if (breakpoint->set)
2119 {
2120 LOG_WARNING("breakpoint already set");
2121 return ERROR_OK;
2122 }
2123
2124 if (breakpoint->type == BKPT_HARD)
2125 {
2126 uint32_t value = breakpoint->address | 1;
2127 if (!xscale->ibcr0_used)
2128 {
2129 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2130 xscale->ibcr0_used = 1;
2131 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2132 }
2133 else if (!xscale->ibcr1_used)
2134 {
2135 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2136 xscale->ibcr1_used = 1;
2137 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2138 }
2139 else
2140 {
2141 LOG_ERROR("BUG: no hardware comparator available");
2142 return ERROR_OK;
2143 }
2144 }
2145 else if (breakpoint->type == BKPT_SOFT)
2146 {
2147 if (breakpoint->length == 4)
2148 {
2149 /* keep the original instruction in target endianness */
2150 if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2151 {
2152 return retval;
2153 }
2154 /* write the bkpt instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2155 if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2156 {
2157 return retval;
2158 }
2159 }
2160 else
2161 {
2162 /* keep the original instruction in target endianness */
2163 if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2164 {
2165 return retval;
2166 }
2167 /* write the bkpt instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2168 if ((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2169 {
2170 return retval;
2171 }
2172 }
2173 breakpoint->set = 1;
2174
2175 xscale_send_u32(target, 0x50); /* clean dcache */
2176 xscale_send_u32(target, xscale->cache_clean_address);
2177 xscale_send_u32(target, 0x51); /* invalidate dcache */
2178 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2179 }
2180
2181 return ERROR_OK;
2182 }
2183
2184 static int xscale_add_breakpoint(struct target *target,
2185 struct breakpoint *breakpoint)
2186 {
2187 struct xscale_common *xscale = target_to_xscale(target);
2188
2189 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2190 {
2191 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2192 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2193 }
2194
2195 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2196 {
2197 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2198 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2199 }
2200
2201 if (breakpoint->type == BKPT_HARD)
2202 {
2203 xscale->ibcr_available--;
2204 }
2205
2206 return ERROR_OK;
2207 }
2208
2209 static int xscale_unset_breakpoint(struct target *target,
2210 struct breakpoint *breakpoint)
2211 {
2212 int retval;
2213 struct xscale_common *xscale = target_to_xscale(target);
2214
2215 if (target->state != TARGET_HALTED)
2216 {
2217 LOG_WARNING("target not halted");
2218 return ERROR_TARGET_NOT_HALTED;
2219 }
2220
2221 if (!breakpoint->set)
2222 {
2223 LOG_WARNING("breakpoint not set");
2224 return ERROR_OK;
2225 }
2226
2227 if (breakpoint->type == BKPT_HARD)
2228 {
2229 if (breakpoint->set == 1)
2230 {
2231 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2232 xscale->ibcr0_used = 0;
2233 }
2234 else if (breakpoint->set == 2)
2235 {
2236 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2237 xscale->ibcr1_used = 0;
2238 }
2239 breakpoint->set = 0;
2240 }
2241 else
2242 {
2243 /* restore original instruction (kept in target endianness) */
2244 if (breakpoint->length == 4)
2245 {
2246 if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2247 {
2248 return retval;
2249 }
2250 }
2251 else
2252 {
2253 if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2254 {
2255 return retval;
2256 }
2257 }
2258 breakpoint->set = 0;
2259
2260 xscale_send_u32(target, 0x50); /* clean dcache */
2261 xscale_send_u32(target, xscale->cache_clean_address);
2262 xscale_send_u32(target, 0x51); /* invalidate dcache */
2263 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2264 }
2265
2266 return ERROR_OK;
2267 }
2268
2269 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2270 {
2271 struct xscale_common *xscale = target_to_xscale(target);
2272
2273 if (target->state != TARGET_HALTED)
2274 {
2275 LOG_WARNING("target not halted");
2276 return ERROR_TARGET_NOT_HALTED;
2277 }
2278
2279 if (breakpoint->set)
2280 {
2281 xscale_unset_breakpoint(target, breakpoint);
2282 }
2283
2284 if (breakpoint->type == BKPT_HARD)
2285 xscale->ibcr_available++;
2286
2287 return ERROR_OK;
2288 }
2289
2290 static int xscale_set_watchpoint(struct target *target,
2291 struct watchpoint *watchpoint)
2292 {
2293 struct xscale_common *xscale = target_to_xscale(target);
2294 uint32_t enable = 0;
2295 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2296 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2297
2298 if (target->state != TARGET_HALTED)
2299 {
2300 LOG_WARNING("target not halted");
2301 return ERROR_TARGET_NOT_HALTED;
2302 }
2303
2304 switch (watchpoint->rw)
2305 {
2306 case WPT_READ:
2307 enable = 0x3;
2308 break;
2309 case WPT_ACCESS:
2310 enable = 0x2;
2311 break;
2312 case WPT_WRITE:
2313 enable = 0x1;
2314 break;
2315 default:
2316 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2317 }
2318
2319 /* For watchpoint across more than one word, both DBR registers must
2320 be enlisted, with the second used as a mask. */
2321 if (watchpoint->length > 4)
2322 {
2323 if (xscale->dbr0_used || xscale->dbr1_used)
2324 {
2325 LOG_ERROR("BUG: sufficient hardware comparators unavailable");
2326 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2327 }
2328
2329 /* Write mask value to DBR1, based on the length argument.
2330 * Address bits ignored by the comparator are those set in mask. */
2331 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1],
2332 watchpoint->length - 1);
2333 xscale->dbr1_used = 1;
2334 enable |= 0x100; /* DBCON[M] */
2335 }
2336
2337 if (!xscale->dbr0_used)
2338 {
2339 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2340 dbcon_value |= enable;
2341 xscale_set_reg_u32(dbcon, dbcon_value);
2342 watchpoint->set = 1;
2343 xscale->dbr0_used = 1;
2344 }
2345 else if (!xscale->dbr1_used)
2346 {
2347 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2348 dbcon_value |= enable << 2;
2349 xscale_set_reg_u32(dbcon, dbcon_value);
2350 watchpoint->set = 2;
2351 xscale->dbr1_used = 1;
2352 }
2353 else
2354 {
2355 LOG_ERROR("BUG: no hardware comparator available");
2356 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2357 }
2358
2359 return ERROR_OK;
2360 }
2361
2362 static int xscale_add_watchpoint(struct target *target,
2363 struct watchpoint *watchpoint)
2364 {
2365 struct xscale_common *xscale = target_to_xscale(target);
2366
2367 if (xscale->dbr_available < 1)
2368 {
2369 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2370 }
2371
2372 if (watchpoint->value)
2373 LOG_WARNING("xscale does not support value, mask arguments; ignoring");
2374
2375 /* check that length is a power of two */
2376 for (uint32_t len = watchpoint->length; len != 1; len /= 2)
2377 {
2378 if (len % 2)
2379 {
2380 LOG_ERROR("xscale requires that watchpoint length is a power of two");
2381 return ERROR_COMMAND_ARGUMENT_INVALID;
2382 }
2383 }
2384
2385 if (watchpoint->length == 4) /* single word watchpoint */
2386 {
2387 xscale->dbr_available--; /* one DBR reg used */
2388 return ERROR_OK;
2389 }
2390
2391 /* watchpoints across multiple words require both DBR registers */
2392 if (xscale->dbr_available < 2)
2393 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2394
2395 xscale->dbr_available = 0;
2396 return ERROR_OK;
2397 }
2398
2399 static int xscale_unset_watchpoint(struct target *target,
2400 struct watchpoint *watchpoint)
2401 {
2402 struct xscale_common *xscale = target_to_xscale(target);
2403 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2404 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2405
2406 if (target->state != TARGET_HALTED)
2407 {
2408 LOG_WARNING("target not halted");
2409 return ERROR_TARGET_NOT_HALTED;
2410 }
2411
2412 if (!watchpoint->set)
2413 {
2414 LOG_WARNING("breakpoint not set");
2415 return ERROR_OK;
2416 }
2417
2418 if (watchpoint->set == 1)
2419 {
2420 if (watchpoint->length > 4)
2421 {
2422 dbcon_value &= ~0x103; /* clear DBCON[M] as well */
2423 xscale->dbr1_used = 0; /* DBR1 was used for mask */
2424 }
2425 else
2426 dbcon_value &= ~0x3;
2427
2428 xscale_set_reg_u32(dbcon, dbcon_value);
2429 xscale->dbr0_used = 0;
2430 }
2431 else if (watchpoint->set == 2)
2432 {
2433 dbcon_value &= ~0xc;
2434 xscale_set_reg_u32(dbcon, dbcon_value);
2435 xscale->dbr1_used = 0;
2436 }
2437 watchpoint->set = 0;
2438
2439 return ERROR_OK;
2440 }
2441
2442 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2443 {
2444 struct xscale_common *xscale = target_to_xscale(target);
2445
2446 if (target->state != TARGET_HALTED)
2447 {
2448 LOG_WARNING("target not halted");
2449 return ERROR_TARGET_NOT_HALTED;
2450 }
2451
2452 if (watchpoint->set)
2453 {
2454 xscale_unset_watchpoint(target, watchpoint);
2455 }
2456
2457 if (watchpoint->length > 4)
2458 xscale->dbr_available++; /* both DBR regs now available */
2459
2460 xscale->dbr_available++;
2461
2462 return ERROR_OK;
2463 }
2464
2465 static int xscale_get_reg(struct reg *reg)
2466 {
2467 struct xscale_reg *arch_info = reg->arch_info;
2468 struct target *target = arch_info->target;
2469 struct xscale_common *xscale = target_to_xscale(target);
2470
2471 /* DCSR, TX and RX are accessible via JTAG */
2472 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2473 {
2474 return xscale_read_dcsr(arch_info->target);
2475 }
2476 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2477 {
2478 /* 1 = consume register content */
2479 return xscale_read_tx(arch_info->target, 1);
2480 }
2481 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2482 {
2483 /* can't read from RX register (host -> debug handler) */
2484 return ERROR_OK;
2485 }
2486 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2487 {
2488 /* can't (explicitly) read from TXRXCTRL register */
2489 return ERROR_OK;
2490 }
2491 else /* Other DBG registers have to be transfered by the debug handler */
2492 {
2493 /* send CP read request (command 0x40) */
2494 xscale_send_u32(target, 0x40);
2495
2496 /* send CP register number */
2497 xscale_send_u32(target, arch_info->dbg_handler_number);
2498
2499 /* read register value */
2500 xscale_read_tx(target, 1);
2501 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2502
2503 reg->dirty = 0;
2504 reg->valid = 1;
2505 }
2506
2507 return ERROR_OK;
2508 }
2509
2510 static int xscale_set_reg(struct reg *reg, uint8_t* buf)
2511 {
2512 struct xscale_reg *arch_info = reg->arch_info;
2513 struct target *target = arch_info->target;
2514 struct xscale_common *xscale = target_to_xscale(target);
2515 uint32_t value = buf_get_u32(buf, 0, 32);
2516
2517 /* DCSR, TX and RX are accessible via JTAG */
2518 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2519 {
2520 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2521 return xscale_write_dcsr(arch_info->target, -1, -1);
2522 }
2523 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2524 {
2525 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2526 return xscale_write_rx(arch_info->target);
2527 }
2528 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2529 {
2530 /* can't write to TX register (debug-handler -> host) */
2531 return ERROR_OK;
2532 }
2533 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2534 {
2535 /* can't (explicitly) write to TXRXCTRL register */
2536 return ERROR_OK;
2537 }
2538 else /* Other DBG registers have to be transfered by the debug handler */
2539 {
2540 /* send CP write request (command 0x41) */
2541 xscale_send_u32(target, 0x41);
2542
2543 /* send CP register number */
2544 xscale_send_u32(target, arch_info->dbg_handler_number);
2545
2546 /* send CP register value */
2547 xscale_send_u32(target, value);
2548 buf_set_u32(reg->value, 0, 32, value);
2549 }
2550
2551 return ERROR_OK;
2552 }
2553
2554 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2555 {
2556 struct xscale_common *xscale = target_to_xscale(target);
2557 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2558 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2559
2560 /* send CP write request (command 0x41) */
2561 xscale_send_u32(target, 0x41);
2562
2563 /* send CP register number */
2564 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2565
2566 /* send CP register value */
2567 xscale_send_u32(target, value);
2568 buf_set_u32(dcsr->value, 0, 32, value);
2569
2570 return ERROR_OK;
2571 }
2572
2573 static int xscale_read_trace(struct target *target)
2574 {
2575 struct xscale_common *xscale = target_to_xscale(target);
2576 struct arm *armv4_5 = &xscale->armv4_5_common;
2577 struct xscale_trace_data **trace_data_p;
2578
2579 /* 258 words from debug handler
2580 * 256 trace buffer entries
2581 * 2 checkpoint addresses
2582 */
2583 uint32_t trace_buffer[258];
2584 int is_address[256];
2585 int i, j;
2586 unsigned int num_checkpoints = 0;
2587
2588 if (target->state != TARGET_HALTED)
2589 {
2590 LOG_WARNING("target must be stopped to read trace data");
2591 return ERROR_TARGET_NOT_HALTED;
2592 }
2593
2594 /* send read trace buffer command (command 0x61) */
2595 xscale_send_u32(target, 0x61);
2596
2597 /* receive trace buffer content */
2598 xscale_receive(target, trace_buffer, 258);
2599
2600 /* parse buffer backwards to identify address entries */
2601 for (i = 255; i >= 0; i--)
2602 {
2603 /* also count number of checkpointed entries */
2604 if ((trace_buffer[i] & 0xe0) == 0xc0)
2605 num_checkpoints++;
2606
2607 is_address[i] = 0;
2608 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2609 ((trace_buffer[i] & 0xf0) == 0xd0))
2610 {
2611 if (i > 0)
2612 is_address[--i] = 1;
2613 if (i > 0)
2614 is_address[--i] = 1;
2615 if (i > 0)
2616 is_address[--i] = 1;
2617 if (i > 0)
2618 is_address[--i] = 1;
2619 }
2620 }
2621
2622
2623 /* search first non-zero entry that is not part of an address */
2624 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2625 ;
2626
2627 if (j == 256)
2628 {
2629 LOG_DEBUG("no trace data collected");
2630 return ERROR_XSCALE_NO_TRACE_DATA;
2631 }
2632
2633 /* account for possible partial address at buffer start (wrap mode only) */
2634 if (is_address[0])
2635 { /* first entry is address; complete set of 4? */
2636 i = 1;
2637 while (i < 4)
2638 if (!is_address[i++])
2639 break;
2640 if (i < 4)
2641 j += i; /* partial address; can't use it */
2642 }
2643
2644 /* if first valid entry is indirect branch, can't use that either (no address) */
2645 if (((trace_buffer[j] & 0xf0) == 0x90) || ((trace_buffer[j] & 0xf0) == 0xd0))
2646 j++;
2647
2648 /* walk linked list to terminating entry */
2649 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2650 ;
2651
2652 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2653 (*trace_data_p)->next = NULL;
2654 (*trace_data_p)->chkpt0 = trace_buffer[256];
2655 (*trace_data_p)->chkpt1 = trace_buffer[257];
2656 (*trace_data_p)->last_instruction =
2657 buf_get_u32(armv4_5->pc->value, 0, 32);
2658 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2659 (*trace_data_p)->depth = 256 - j;
2660 (*trace_data_p)->num_checkpoints = num_checkpoints;
2661
2662 for (i = j; i < 256; i++)
2663 {
2664 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2665 if (is_address[i])
2666 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2667 else
2668 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2669 }
2670
2671 return ERROR_OK;
2672 }
2673
2674 static int xscale_read_instruction(struct target *target, uint32_t pc,
2675 struct arm_instruction *instruction)
2676 {
2677 struct xscale_common *const xscale = target_to_xscale(target);
2678 int i;
2679 int section = -1;
2680 size_t size_read;
2681 uint32_t opcode;
2682 int retval;
2683
2684 if (!xscale->trace.image)
2685 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2686
2687 /* search for the section the current instruction belongs to */
2688 for (i = 0; i < xscale->trace.image->num_sections; i++)
2689 {
2690 if ((xscale->trace.image->sections[i].base_address <= pc) &&
2691 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > pc))
2692 {
2693 section = i;
2694 break;
2695 }
2696 }
2697
2698 if (section == -1)
2699 {
2700 /* current instruction couldn't be found in the image */
2701 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2702 }
2703
2704 if (xscale->trace.core_state == ARM_STATE_ARM)
2705 {
2706 uint8_t buf[4];
2707 if ((retval = image_read_section(xscale->trace.image, section,
2708 pc - xscale->trace.image->sections[section].base_address,
2709 4, buf, &size_read)) != ERROR_OK)
2710 {
2711 LOG_ERROR("error while reading instruction: %i", retval);
2712 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2713 }
2714 opcode = target_buffer_get_u32(target, buf);
2715 arm_evaluate_opcode(opcode, pc, instruction);
2716 }
2717 else if (xscale->trace.core_state == ARM_STATE_THUMB)
2718 {
2719 uint8_t buf[2];
2720 if ((retval = image_read_section(xscale->trace.image, section,
2721 pc - xscale->trace.image->sections[section].base_address,
2722 2, buf, &size_read)) != ERROR_OK)
2723 {
2724 LOG_ERROR("error while reading instruction: %i", retval);
2725 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2726 }
2727 opcode = target_buffer_get_u16(target, buf);
2728 thumb_evaluate_opcode(opcode, pc, instruction);
2729 }
2730 else
2731 {
2732 LOG_ERROR("BUG: unknown core state encountered");
2733 exit(-1);
2734 }
2735
2736 return ERROR_OK;
2737 }
2738
2739 /* Extract address encoded into trace data.
2740 * Write result to address referenced by argument 'target', or 0 if incomplete. */
2741 static inline void xscale_branch_address(struct xscale_trace_data *trace_data,
2742 int i, uint32_t *target)
2743 {
2744 /* if there are less than four entries prior to the indirect branch message
2745 * we can't extract the address */
2746 if (i < 4)
2747 *target = 0;
2748 else
2749 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2750 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2751 }
2752
2753 static inline void xscale_display_instruction(struct target *target, uint32_t pc,
2754 struct arm_instruction *instruction,
2755 struct command_context *cmd_ctx)
2756 {
2757 int retval = xscale_read_instruction(target, pc, instruction);
2758 if (retval == ERROR_OK)
2759 command_print(cmd_ctx, "%s", instruction->text);
2760 else
2761 command_print(cmd_ctx, "0x%8.8" PRIx32 "\t<not found in image>", pc);
2762 }
2763
2764 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2765 {
2766 struct xscale_common *xscale = target_to_xscale(target);
2767 struct xscale_trace_data *trace_data = xscale->trace.data;
2768 int i, retval;
2769 uint32_t breakpoint_pc;
2770 struct arm_instruction instruction;
2771 uint32_t current_pc = 0; /* initialized when address determined */
2772
2773 if (!xscale->trace.image)
2774 LOG_WARNING("No trace image loaded; use 'xscale trace_image'");
2775
2776 /* loop for each trace buffer that was loaded from target */
2777 while (trace_data)
2778 {
2779 int chkpt = 0; /* incremented as checkpointed entries found */
2780 int j;
2781
2782 /* FIXME: set this to correct mode when trace buffer is first enabled */
2783 xscale->trace.core_state = ARM_STATE_ARM;
2784
2785 /* loop for each entry in this trace buffer */
2786 for (i = 0; i < trace_data->depth; i++)
2787 {
2788 int exception = 0;
2789 uint32_t chkpt_reg = 0x0;
2790 uint32_t branch_target = 0;
2791 int count;
2792
2793 /* trace entry type is upper nybble of 'message byte' */
2794 int trace_msg_type = (trace_data->entries[i].data & 0xf0) >> 4;
2795
2796 /* Target addresses of indirect branches are written into buffer
2797 * before the message byte representing the branch. Skip past it */
2798 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2799 continue;
2800
2801 switch (trace_msg_type)
2802 {
2803 case 0: /* Exceptions */
2804 case 1:
2805 case 2:
2806 case 3:
2807 case 4:
2808 case 5:
2809 case 6:
2810 case 7:
2811 exception = (trace_data->entries[i].data & 0x70) >> 4;
2812
2813 /* FIXME: vector table may be at ffff0000 */
2814 branch_target = (trace_data->entries[i].data & 0xf0) >> 2;
2815 break;
2816
2817 case 8: /* Direct Branch */
2818 break;
2819
2820 case 9: /* Indirect Branch */
2821 xscale_branch_address(trace_data, i, &branch_target);
2822 break;
2823
2824 case 13: /* Checkpointed Indirect Branch */
2825 xscale_branch_address(trace_data, i, &branch_target);
2826 if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
2827 chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is oldest */
2828 else
2829 chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and newest */
2830
2831 chkpt++;
2832 break;
2833
2834 case 12: /* Checkpointed Direct Branch */
2835 if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
2836 chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is oldest */
2837 else
2838 chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and newest */
2839
2840 /* if no current_pc, checkpoint will be starting point */
2841 if (current_pc == 0)
2842 branch_target = chkpt_reg;
2843
2844 chkpt++;
2845 break;
2846
2847 case 15: /* Roll-over */
2848 break;
2849
2850 default: /* Reserved */
2851 LOG_WARNING("trace is suspect: invalid trace message byte");
2852 continue;
2853
2854 }
2855
2856 /* If we don't have the current_pc yet, but we did get the branch target
2857 * (either from the trace buffer on indirect branch, or from a checkpoint reg),
2858 * then we can start displaying instructions at the next iteration, with
2859 * branch_target as the starting point.
2860 */
2861 if (current_pc == 0)
2862 {
2863 current_pc = branch_target; /* remains 0 unless branch_target obtained */
2864 continue;
2865 }
2866
2867 /* We have current_pc. Read and display the instructions from the image.
2868 * First, display count instructions (lower nybble of message byte). */
2869 count = trace_data->entries[i].data & 0x0f;
2870 for (j = 0; j < count; j++)
2871 {
2872 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2873 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2874 }
2875
2876 /* An additional instruction is implicitly added to count for
2877 * rollover and some exceptions: undef, swi, prefetch abort. */
2878 if ((trace_msg_type == 15) || (exception > 0 && exception < 4))
2879 {
2880 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2881 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2882 }
2883
2884 if (trace_msg_type == 15) /* rollover */
2885 continue;
2886
2887 if (exception)
2888 {
2889 command_print(cmd_ctx, "--- exception %i ---", exception);
2890 continue;
2891 }
2892
2893 /* not exception or rollover; next instruction is a branch and is
2894 * not included in the count */
2895 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2896
2897 /* for direct branches, extract branch destination from instruction */
2898 if ((trace_msg_type == 8) || (trace_msg_type == 12))
2899 {
2900 retval = xscale_read_instruction(target, current_pc, &instruction);
2901 if (retval == ERROR_OK)
2902 current_pc = instruction.info.b_bl_bx_blx.target_address;
2903 else
2904 current_pc = 0; /* branch destination unknown */
2905
2906 /* direct branch w/ checkpoint; can also get from checkpoint reg */
2907 if (trace_msg_type == 12)
2908 {
2909 if (current_pc == 0)
2910 current_pc = chkpt_reg;
2911 else if (current_pc != chkpt_reg) /* sanity check */
2912 LOG_WARNING("trace is suspect: checkpoint register "
2913 "inconsistent with adddress from image");
2914 }
2915
2916 if (current_pc == 0)
2917 command_print(cmd_ctx, "address unknown");
2918
2919 continue;
2920 }
2921
2922 /* indirect branch; the branch destination was read from trace buffer */
2923 if ((trace_msg_type == 9) || (trace_msg_type == 13))
2924 {
2925 current_pc = branch_target;
2926
2927 /* sanity check (checkpoint reg is redundant) */
2928 if ((trace_msg_type == 13) && (chkpt_reg != branch_target))
2929 LOG_WARNING("trace is suspect: checkpoint register "
2930 "inconsistent with address from trace buffer");
2931 }
2932
2933 } /* END: for (i = 0; i < trace_data->depth; i++) */
2934
2935 breakpoint_pc = trace_data->last_instruction; /* used below */
2936 trace_data = trace_data->next;
2937
2938 } /* END: while (trace_data) */
2939
2940 /* Finally... display all instructions up to the value of the pc when the
2941 * debug break occurred (saved when trace data was collected from target).
2942 * This is necessary because the trace only records execution branches and 16
2943 * consecutive instructions (rollovers), so last few typically missed.
2944 */
2945 if (current_pc == 0)
2946 return ERROR_OK; /* current_pc was never found */
2947
2948 /* how many instructions remaining? */
2949 int gap_count = (breakpoint_pc - current_pc) /
2950 (xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2);
2951
2952 /* should never be negative or over 16, but verify */
2953 if (gap_count < 0 || gap_count > 16)
2954 {
2955 LOG_WARNING("trace is suspect: excessive gap at end of trace");
2956 return ERROR_OK; /* bail; large number or negative value no good */
2957 }
2958
2959 /* display remaining instructions */
2960 for (i = 0; i < gap_count; i++)
2961 {
2962 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2963 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2964 }
2965
2966 return ERROR_OK;
2967 }
2968
2969 static const struct reg_arch_type xscale_reg_type = {
2970 .get = xscale_get_reg,
2971 .set = xscale_set_reg,
2972 };
2973
2974 static void xscale_build_reg_cache(struct target *target)
2975 {
2976 struct xscale_common *xscale = target_to_xscale(target);
2977 struct arm *armv4_5 = &xscale->armv4_5_common;
2978 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2979 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2980 int i;
2981 int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
2982
2983 (*cache_p) = arm_build_reg_cache(target, armv4_5);
2984
2985 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2986 cache_p = &(*cache_p)->next;
2987
2988 /* fill in values for the xscale reg cache */
2989 (*cache_p)->name = "XScale registers";
2990 (*cache_p)->next = NULL;
2991 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
2992 (*cache_p)->num_regs = num_regs;
2993
2994 for (i = 0; i < num_regs; i++)
2995 {
2996 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2997 (*cache_p)->reg_list[i].value = calloc(4, 1);
2998 (*cache_p)->reg_list[i].dirty = 0;
2999 (*cache_p)->reg_list[i].valid = 0;
3000 (*cache_p)->reg_list[i].size = 32;
3001 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
3002 (*cache_p)->reg_list[i].type = &xscale_reg_type;
3003 arch_info[i] = xscale_reg_arch_info[i];
3004 arch_info[i].target = target;
3005 }
3006
3007 xscale->reg_cache = (*cache_p);
3008 }
3009
3010 static int xscale_init_target(struct command_context *cmd_ctx,
3011 struct target *target)
3012 {
3013 xscale_build_reg_cache(target);
3014 return ERROR_OK;
3015 }
3016
3017 static int xscale_init_arch_info(struct target *target,
3018 struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
3019 {
3020 struct arm *armv4_5;
3021 uint32_t high_reset_branch, low_reset_branch;
3022 int i;
3023
3024 armv4_5 = &xscale->armv4_5_common;
3025
3026 /* store architecture specfic data */
3027 xscale->common_magic = XSCALE_COMMON_MAGIC;
3028
3029 /* we don't really *need* a variant param ... */
3030 if (variant) {
3031 int ir_length = 0;
3032
3033 if (strcmp(variant, "pxa250") == 0
3034 || strcmp(variant, "pxa255") == 0
3035 || strcmp(variant, "pxa26x") == 0)
3036 ir_length = 5;
3037 else if (strcmp(variant, "pxa27x") == 0
3038 || strcmp(variant, "ixp42x") == 0
3039 || strcmp(variant, "ixp45x") == 0
3040 || strcmp(variant, "ixp46x") == 0)
3041 ir_length = 7;
3042 else if (strcmp(variant, "pxa3xx") == 0)
3043 ir_length = 11;
3044 else
3045 LOG_WARNING("%s: unrecognized variant %s",
3046 tap->dotted_name, variant);
3047
3048 if (ir_length && ir_length != tap->ir_length) {
3049 LOG_WARNING("%s: IR length for %s is %d; fixing",
3050 tap->dotted_name, variant, ir_length);
3051 tap->ir_length = ir_length;
3052 }
3053 }
3054
3055 /* PXA3xx shifts the JTAG instructions */
3056 if (tap->ir_length == 11)
3057 xscale->xscale_variant = XSCALE_PXA3XX;
3058 else
3059 xscale->xscale_variant = XSCALE_IXP4XX_PXA2XX;
3060
3061 /* the debug handler isn't installed (and thus not running) at this time */
3062 xscale->handler_address = 0xfe000800;
3063
3064 /* clear the vectors we keep locally for reference */
3065 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
3066 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
3067
3068 /* no user-specified vectors have been configured yet */
3069 xscale->static_low_vectors_set = 0x0;
3070 xscale->static_high_vectors_set = 0x0;
3071
3072 /* calculate branches to debug handler */
3073 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
3074 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
3075
3076 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
3077 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
3078
3079 for (i = 1; i <= 7; i++)
3080 {
3081 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3082 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3083 }
3084
3085 /* 64kB aligned region used for DCache cleaning */
3086 xscale->cache_clean_address = 0xfffe0000;
3087
3088 xscale->hold_rst = 0;
3089 xscale->external_debug_break = 0;
3090
3091 xscale->ibcr_available = 2;
3092 xscale->ibcr0_used = 0;
3093 xscale->ibcr1_used = 0;
3094
3095 xscale->dbr_available = 2;
3096 xscale->dbr0_used = 0;
3097 xscale->dbr1_used = 0;
3098
3099 LOG_INFO("%s: hardware has 2 breakpoints and 2 watchpoints",
3100 target_name(target));
3101
3102 xscale->arm_bkpt = ARMV5_BKPT(0x0);
3103 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
3104
3105 xscale->vector_catch = 0x1;
3106
3107 xscale->trace.capture_status = TRACE_IDLE;
3108 xscale->trace.data = NULL;
3109 xscale->trace.image = NULL;
3110 xscale->trace.buffer_enabled = 0;
3111 xscale->trace.buffer_fill = 0;
3112
3113 /* prepare ARMv4/5 specific information */
3114 armv4_5->arch_info = xscale;
3115 armv4_5->read_core_reg = xscale_read_core_reg;
3116 armv4_5->write_core_reg = xscale_write_core_reg;
3117 armv4_5->full_context = xscale_full_context;
3118
3119 arm_init_arch_info(target, armv4_5);
3120
3121 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
3122 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
3123 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
3124 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
3125 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
3126 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
3127 xscale->armv4_5_mmu.has_tiny_pages = 1;
3128 xscale->armv4_5_mmu.mmu_enabled = 0;
3129
3130 return ERROR_OK;
3131 }
3132
3133 static int xscale_target_create(struct target *target, Jim_Interp *interp)
3134 {
3135 struct xscale_common *xscale;
3136
3137 if (sizeof xscale_debug_handler - 1 > 0x800) {
3138 LOG_ERROR("debug_handler.bin: larger than 2kb");
3139 return ERROR_FAIL;
3140 }
3141
3142 xscale = calloc(1, sizeof(*xscale));
3143 if (!xscale)
3144 return ERROR_FAIL;
3145
3146 return xscale_init_arch_info(target, xscale, target->tap,
3147 target->variant);
3148 }
3149
3150 COMMAND_HANDLER(xscale_handle_debug_handler_command)
3151 {
3152 struct target *target = NULL;
3153 struct xscale_common *xscale;
3154 int retval;
3155 uint32_t handler_address;
3156
3157 if (CMD_ARGC < 2)
3158 {
3159 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3160 return ERROR_OK;
3161 }
3162
3163 if ((target = get_target(CMD_ARGV[0])) == NULL)
3164 {
3165 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3166 return ERROR_FAIL;
3167 }
3168
3169 xscale = target_to_xscale(target);
3170 retval = xscale_verify_pointer(CMD_CTX, xscale);
3171 if (retval != ERROR_OK)
3172 return retval;
3173
3174 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3175
3176 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3177 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3178 {
3179 xscale->handler_address = handler_address;
3180 }
3181 else
3182 {
3183 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3184 return ERROR_FAIL;
3185 }
3186
3187 return ERROR_OK;
3188 }
3189
3190 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3191 {
3192 struct target *target = NULL;
3193 struct xscale_common *xscale;
3194 int retval;
3195 uint32_t cache_clean_address;
3196
3197 if (CMD_ARGC < 2)
3198 {
3199 return ERROR_COMMAND_SYNTAX_ERROR;
3200 }
3201
3202 target = get_target(CMD_ARGV[0]);
3203 if (target == NULL)
3204 {
3205 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3206 return ERROR_FAIL;
3207 }
3208 xscale = target_to_xscale(target);
3209 retval = xscale_verify_pointer(CMD_CTX, xscale);
3210 if (retval != ERROR_OK)
3211 return retval;
3212
3213 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3214
3215 if (cache_clean_address & 0xffff)
3216 {
3217 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3218 }
3219 else
3220 {
3221 xscale->cache_clean_address = cache_clean_address;
3222 }
3223
3224 return ERROR_OK;
3225 }
3226
3227 COMMAND_HANDLER(xscale_handle_cache_info_command)
3228 {
3229 struct target *target = get_current_target(CMD_CTX);
3230 struct xscale_common *xscale = target_to_xscale(target);
3231 int retval;
3232
3233 retval = xscale_verify_pointer(CMD_CTX, xscale);
3234 if (retval != ERROR_OK)
3235 return retval;
3236
3237 return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
3238 }
3239
3240 static int xscale_virt2phys(struct target *target,
3241 uint32_t virtual, uint32_t *physical)
3242 {
3243 struct xscale_common *xscale = target_to_xscale(target);
3244 uint32_t cb;
3245
3246 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3247 LOG_ERROR(xscale_not);
3248 return ERROR_TARGET_INVALID;
3249 }
3250
3251 uint32_t ret;
3252 int retval = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu,
3253 virtual, &cb, &ret);
3254 if (retval != ERROR_OK)
3255 return retval;
3256 *physical = ret;
3257 return ERROR_OK;
3258 }
3259
3260 static int xscale_mmu(struct target *target, int *enabled)
3261 {
3262 struct xscale_common *xscale = target_to_xscale(target);
3263
3264 if (target->state != TARGET_HALTED)
3265 {
3266 LOG_ERROR("Target not halted");
3267 return ERROR_TARGET_INVALID;
3268 }
3269 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3270 return ERROR_OK;
3271 }
3272
3273 COMMAND_HANDLER(xscale_handle_mmu_command)
3274 {
3275 struct target *target = get_current_target(CMD_CTX);
3276 struct xscale_common *xscale = target_to_xscale(target);
3277 int retval;
3278
3279 retval = xscale_verify_pointer(CMD_CTX, xscale);
3280 if (retval != ERROR_OK)
3281 return retval;
3282
3283 if (target->state != TARGET_HALTED)
3284 {
3285 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3286 return ERROR_OK;
3287 }
3288
3289 if (CMD_ARGC >= 1)
3290 {
3291 bool enable;
3292 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3293 if (enable)
3294 xscale_enable_mmu_caches(target, 1, 0, 0);
3295 else
3296 xscale_disable_mmu_caches(target, 1, 0, 0);
3297 xscale->armv4_5_mmu.mmu_enabled = enable;
3298 }
3299
3300 command_print(CMD_CTX, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3301
3302 return ERROR_OK;
3303 }
3304
3305 COMMAND_HANDLER(xscale_handle_idcache_command)
3306 {
3307 struct target *target = get_current_target(CMD_CTX);
3308 struct xscale_common *xscale = target_to_xscale(target);
3309
3310 int retval = xscale_verify_pointer(CMD_CTX, xscale);
3311 if (retval != ERROR_OK)
3312 return retval;
3313
3314 if (target->state != TARGET_HALTED)
3315 {
3316 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3317 return ERROR_OK;
3318 }
3319
3320 bool icache = false;
3321 if (strcmp(CMD_NAME, "icache") == 0)
3322 icache = true;
3323 if (CMD_ARGC >= 1)
3324 {
3325 bool enable;
3326 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3327 if (icache) {
3328 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3329 if (enable)
3330 xscale_enable_mmu_caches(target, 0, 0, 1);
3331 else
3332 xscale_disable_mmu_caches(target, 0, 0, 1);
3333 } else {
3334 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3335 if (enable)
3336 xscale_enable_mmu_caches(target, 0, 1, 0);
3337 else
3338 xscale_disable_mmu_caches(target, 0, 1, 0);
3339 }
3340 }
3341
3342 bool enabled = icache ?
3343 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3344 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3345 const char *msg = enabled ? "enabled" : "disabled";
3346 command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
3347
3348 return ERROR_OK;
3349 }
3350
3351 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3352 {
3353 struct target *target = get_current_target(CMD_CTX);
3354 struct xscale_common *xscale = target_to_xscale(target);
3355 int retval;
3356
3357 retval = xscale_verify_pointer(CMD_CTX, xscale);
3358 if (retval != ERROR_OK)
3359 return retval;
3360
3361 if (CMD_ARGC < 1)
3362 {
3363 command_print(CMD_CTX, "usage: xscale vector_catch [mask]");
3364 }
3365 else
3366 {
3367 COMMAND_PARSE_NUMBER(u8, CMD_ARGV[0], xscale->vector_catch);
3368 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3369 xscale_write_dcsr(target, -1, -1);
3370 }
3371
3372 command_print(CMD_CTX, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3373
3374 return ERROR_OK;
3375 }
3376
3377
3378 COMMAND_HANDLER(xscale_handle_vector_table_command)
3379 {
3380 struct target *target = get_current_target(CMD_CTX);
3381 struct xscale_common *xscale = target_to_xscale(target);
3382 int err = 0;
3383 int retval;
3384
3385 retval = xscale_verify_pointer(CMD_CTX, xscale);
3386 if (retval != ERROR_OK)
3387 return retval;
3388
3389 if (CMD_ARGC == 0) /* print current settings */
3390 {
3391 int idx;
3392
3393 command_print(CMD_CTX, "active user-set static vectors:");
3394 for (idx = 1; idx < 8; idx++)
3395 if (xscale->static_low_vectors_set & (1 << idx))
3396 command_print(CMD_CTX, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
3397 for (idx = 1; idx < 8; idx++)
3398 if (xscale->static_high_vectors_set & (1 << idx))
3399 command_print(CMD_CTX, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
3400 return ERROR_OK;
3401 }
3402
3403 if (CMD_ARGC != 3)
3404 err = 1;
3405 else
3406 {
3407 int idx;
3408 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3409 uint32_t vec;
3410 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3411
3412 if (idx < 1 || idx >= 8)
3413 err = 1;
3414
3415 if (!err && strcmp(CMD_ARGV[0], "low") == 0)
3416 {
3417 xscale->static_low_vectors_set |= (1<<idx);
3418 xscale->static_low_vectors[idx] = vec;
3419 }
3420 else if (!err && (strcmp(CMD_ARGV[0], "high") == 0))
3421 {
3422 xscale->static_high_vectors_set |= (1<<idx);
3423 xscale->static_high_vectors[idx] = vec;
3424 }
3425 else
3426 err = 1;
3427 }
3428
3429 if (err)
3430 command_print(CMD_CTX, "usage: xscale vector_table <high|low> <index> <code>");
3431
3432 return ERROR_OK;
3433 }
3434
3435
3436 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3437 {
3438 struct target *target = get_current_target(CMD_CTX);
3439 struct xscale_common *xscale = target_to_xscale(target);
3440 uint32_t dcsr_value;
3441 int retval;
3442
3443 retval = xscale_verify_pointer(CMD_CTX, xscale);
3444 if (retval != ERROR_OK)
3445 return retval;
3446
3447 if (target->state != TARGET_HALTED)
3448 {
3449 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3450 return ERROR_OK;
3451 }
3452
3453 if ((CMD_ARGC >= 1) && (strcmp("enable", CMD_ARGV[0]) == 0))
3454 {
3455 struct xscale_trace_data *td, *next_td;
3456 xscale->trace.buffer_enabled = 1;
3457
3458 /* free old trace data */
3459 td = xscale->trace.data;
3460 while (td)
3461 {
3462 next_td = td->next;
3463
3464 if (td->entries)
3465 free(td->entries);
3466 free(td);
3467 td = next_td;
3468 }
3469 xscale->trace.data = NULL;
3470 }
3471 else if ((CMD_ARGC >= 1) && (strcmp("disable", CMD_ARGV[0]) == 0))
3472 {
3473 xscale->trace.buffer_enabled = 0;
3474 }
3475
3476 if ((CMD_ARGC >= 2) && (strcmp("fill", CMD_ARGV[1]) == 0))
3477 {
3478 uint32_t fill = 1;
3479 if (CMD_ARGC >= 3)
3480 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], fill);
3481 xscale->trace.buffer_fill = fill;
3482 }
3483 else if ((CMD_ARGC >= 2) && (strcmp("wrap", CMD_ARGV[1]) == 0))
3484 {
3485 xscale->trace.buffer_fill = -1;
3486 }
3487
3488 command_print(CMD_CTX, "trace buffer %s (%s)",
3489 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3490 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3491
3492 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3493 if (xscale->trace.buffer_fill >= 0)
3494 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3495 else
3496 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3497
3498 return ERROR_OK;
3499 }
3500
3501 COMMAND_HANDLER(xscale_handle_trace_image_command)
3502 {
3503 struct target *target = get_current_target(CMD_CTX);
3504 struct xscale_common *xscale = target_to_xscale(target);
3505 int retval;
3506
3507 if (CMD_ARGC < 1)
3508 {
3509 command_print(CMD_CTX, "usage: xscale trace_image <file> [base address] [type]");
3510 return ERROR_OK;
3511 }
3512
3513 retval = xscale_verify_pointer(CMD_CTX, xscale);
3514 if (retval != ERROR_OK)
3515 return retval;
3516
3517 if (xscale->trace.image)
3518 {
3519 image_close(xscale->trace.image);
3520 free(xscale->trace.image);
3521 command_print(CMD_CTX, "previously loaded image found and closed");
3522 }
3523
3524 xscale->trace.image = malloc(sizeof(struct image));
3525 xscale->trace.image->base_address_set = 0;
3526 xscale->trace.image->start_address_set = 0;
3527
3528 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3529 if (CMD_ARGC >= 2)
3530 {
3531 xscale->trace.image->base_address_set = 1;
3532 COMMAND_PARSE_NUMBER(llong, CMD_ARGV[1], xscale->trace.image->base_address);
3533 }
3534 else
3535 {
3536 xscale->trace.image->base_address_set = 0;
3537 }
3538
3539 if (image_open(xscale->trace.image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3540 {
3541 free(xscale->trace.image);
3542 xscale->trace.image = NULL;
3543 return ERROR_OK;
3544 }
3545
3546 return ERROR_OK;
3547 }
3548
3549 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3550 {
3551 struct target *target = get_current_target(CMD_CTX);
3552 struct xscale_common *xscale = target_to_xscale(target);
3553 struct xscale_trace_data *trace_data;
3554 struct fileio file;
3555 int retval;
3556
3557 retval = xscale_verify_pointer(CMD_CTX, xscale);
3558 if (retval != ERROR_OK)
3559 return retval;
3560
3561 if (target->state != TARGET_HALTED)
3562 {
3563 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3564 return ERROR_OK;
3565 }
3566
3567 if (CMD_ARGC < 1)
3568 {
3569 command_print(CMD_CTX, "usage: xscale dump_trace <file>");
3570 return ERROR_OK;
3571 }
3572
3573 trace_data = xscale->trace.data;
3574
3575 if (!trace_data)
3576 {
3577 command_print(CMD_CTX, "no trace data collected");
3578 return ERROR_OK;
3579 }
3580
3581 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3582 {
3583 return ERROR_OK;
3584 }
3585
3586 while (trace_data)
3587 {
3588 int i;
3589
3590 fileio_write_u32(&file, trace_data->chkpt0);
3591 fileio_write_u32(&file, trace_data->chkpt1);
3592 fileio_write_u32(&file, trace_data->last_instruction);
3593 fileio_write_u32(&file, trace_data->depth);
3594
3595 for (i = 0; i < trace_data->depth; i++)
3596 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3597
3598 trace_data = trace_data->next;
3599 }
3600
3601 fileio_close(&file);
3602
3603 return ERROR_OK;
3604 }
3605
3606 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3607 {
3608 struct target *target = get_current_target(CMD_CTX);
3609 struct xscale_common *xscale = target_to_xscale(target);
3610 int retval;
3611
3612 retval = xscale_verify_pointer(CMD_CTX, xscale);
3613 if (retval != ERROR_OK)
3614 return retval;
3615
3616 xscale_analyze_trace(target, CMD_CTX);
3617
3618 return ERROR_OK;
3619 }
3620
3621 COMMAND_HANDLER(xscale_handle_cp15)
3622 {
3623 struct target *target = get_current_target(CMD_CTX);
3624 struct xscale_common *xscale = target_to_xscale(target);
3625 int retval;
3626
3627 retval = xscale_verify_pointer(CMD_CTX, xscale);
3628 if (retval != ERROR_OK)
3629 return retval;
3630
3631 if (target->state != TARGET_HALTED)
3632 {
3633 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3634 return ERROR_OK;
3635 }
3636 uint32_t reg_no = 0;
3637 struct reg *reg = NULL;
3638 if (CMD_ARGC > 0)
3639 {
3640 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3641 /*translate from xscale cp15 register no to openocd register*/
3642 switch (reg_no)
3643 {
3644 case 0:
3645 reg_no = XSCALE_MAINID;
3646 break;
3647 case 1:
3648 reg_no = XSCALE_CTRL;
3649 break;
3650 case 2:
3651 reg_no = XSCALE_TTB;
3652 break;
3653 case 3:
3654 reg_no = XSCALE_DAC;
3655 break;
3656 case 5:
3657 reg_no = XSCALE_FSR;
3658 break;
3659 case 6:
3660 reg_no = XSCALE_FAR;
3661 break;
3662 case 13:
3663 reg_no = XSCALE_PID;
3664 break;
3665 case 15:
3666 reg_no = XSCALE_CPACCESS;
3667 break;
3668 default:
3669 command_print(CMD_CTX, "invalid register number");
3670 return ERROR_INVALID_ARGUMENTS;
3671 }
3672 reg = &xscale->reg_cache->reg_list[reg_no];
3673
3674 }
3675 if (CMD_ARGC == 1)
3676 {
3677 uint32_t value;
3678
3679 /* read cp15 control register */
3680 xscale_get_reg(reg);
3681 value = buf_get_u32(reg->value, 0, 32);
3682 command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
3683 }
3684 else if (CMD_ARGC == 2)
3685 {
3686 uint32_t value;
3687 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3688
3689 /* send CP write request (command 0x41) */
3690 xscale_send_u32(target, 0x41);
3691
3692 /* send CP register number */
3693 xscale_send_u32(target, reg_no);
3694
3695 /* send CP register value */
3696 xscale_send_u32(target, value);
3697
3698 /* execute cpwait to ensure outstanding operations complete */
3699 xscale_send_u32(target, 0x53);
3700 }
3701 else
3702 {
3703 command_print(CMD_CTX, "usage: cp15 [register]<, [value]>");
3704 }
3705
3706 return ERROR_OK;
3707 }
3708
3709 static const struct command_registration xscale_exec_command_handlers[] = {
3710 {
3711 .name = "cache_info",
3712 .handler = xscale_handle_cache_info_command,
3713 .mode = COMMAND_EXEC,
3714 .help = "display information about CPU caches",
3715 },
3716 {
3717 .name = "mmu",
3718 .handler = xscale_handle_mmu_command,
3719 .mode = COMMAND_EXEC,
3720 .help = "enable or disable the MMU",
3721 .usage = "['enable'|'disable']",
3722 },
3723 {
3724 .name = "icache",
3725 .handler = xscale_handle_idcache_command,
3726 .mode = COMMAND_EXEC,
3727 .help = "display ICache state, optionally enabling or "
3728 "disabling it",
3729 .usage = "['enable'|'disable']",
3730 },
3731 {
3732 .name = "dcache",
3733 .handler = xscale_handle_idcache_command,
3734 .mode = COMMAND_EXEC,
3735 .help = "display DCache state, optionally enabling or "
3736 "disabling it",
3737 .usage = "['enable'|'disable']",
3738 },
3739 {
3740 .name = "vector_catch",
3741 .handler = xscale_handle_vector_catch_command,
3742 .mode = COMMAND_EXEC,
3743 .help = "set or display 8-bit mask of vectors "
3744 "that should trigger debug entry",
3745 .usage = "[mask]",
3746 },
3747 {
3748 .name = "vector_table",
3749 .handler = xscale_handle_vector_table_command,
3750 .mode = COMMAND_EXEC,
3751 .help = "set vector table entry in mini-ICache, "
3752 "or display current tables",
3753 .usage = "[('high'|'low') index code]",
3754 },
3755 {
3756 .name = "trace_buffer",
3757 .handler = xscale_handle_trace_buffer_command,
3758 .mode = COMMAND_EXEC,
3759 .help = "display trace buffer status, enable or disable "
3760 "tracing, and optionally reconfigure trace mode",
3761 .usage = "['enable'|'disable' ['fill' number|'wrap']]",
3762 },
3763 {
3764 .name = "dump_trace",
3765 .handler = xscale_handle_dump_trace_command,
3766 .mode = COMMAND_EXEC,
3767 .help = "dump content of trace buffer to file",
3768 .usage = "filename",
3769 },
3770 {
3771 .name = "analyze_trace",
3772 .handler = xscale_handle_analyze_trace_buffer_command,
3773 .mode = COMMAND_EXEC,
3774 .help = "analyze content of trace buffer",
3775 .usage = "",
3776 },
3777 {
3778 .name = "trace_image",
3779 .handler = xscale_handle_trace_image_command,
3780 .mode = COMMAND_EXEC,
3781 .help = "load image from file to address (default 0)",
3782 .usage = "filename [offset [filetype]]",
3783 },
3784 {
3785 .name = "cp15",
3786 .handler = xscale_handle_cp15,
3787 .mode = COMMAND_EXEC,
3788 .help = "Read or write coprocessor 15 register.",
3789 .usage = "register [value]",
3790 },
3791 COMMAND_REGISTRATION_DONE
3792 };
3793 static const struct command_registration xscale_any_command_handlers[] = {
3794 {
3795 .name = "debug_handler",
3796 .handler = xscale_handle_debug_handler_command,
3797 .mode = COMMAND_ANY,
3798 .help = "Change address used for debug handler.",
3799 .usage = "target address",
3800 },
3801 {
3802 .name = "cache_clean_address",
3803 .handler = xscale_handle_cache_clean_address_command,
3804 .mode = COMMAND_ANY,
3805 .help = "Change address used for cleaning data cache.",
3806 .usage = "address",
3807 },
3808 {
3809 .chain = xscale_exec_command_handlers,
3810 },
3811 COMMAND_REGISTRATION_DONE
3812 };
3813 static const struct command_registration xscale_command_handlers[] = {
3814 {
3815 .chain = arm_command_handlers,
3816 },
3817 {
3818 .name = "xscale",
3819 .mode = COMMAND_ANY,
3820 .help = "xscale command group",
3821 .chain = xscale_any_command_handlers,
3822 },
3823 COMMAND_REGISTRATION_DONE
3824 };
3825
3826 struct target_type xscale_target =
3827 {
3828 .name = "xscale",
3829
3830 .poll = xscale_poll,
3831 .arch_state = xscale_arch_state,
3832
3833 .target_request_data = NULL,
3834
3835 .halt = xscale_halt,
3836 .resume = xscale_resume,
3837 .step = xscale_step,
3838
3839 .assert_reset = xscale_assert_reset,
3840 .deassert_reset = xscale_deassert_reset,
3841 .soft_reset_halt = NULL,
3842
3843 /* REVISIT on some cores, allow exporting iwmmxt registers ... */
3844 .get_gdb_reg_list = arm_get_gdb_reg_list,
3845
3846 .read_memory = xscale_read_memory,
3847 .read_phys_memory = xscale_read_phys_memory,
3848 .write_memory = xscale_write_memory,
3849 .write_phys_memory = xscale_write_phys_memory,
3850 .bulk_write_memory = xscale_bulk_write_memory,
3851
3852 .checksum_memory = arm_checksum_memory,
3853 .blank_check_memory = arm_blank_check_memory,
3854
3855 .run_algorithm = armv4_5_run_algorithm,
3856
3857 .add_breakpoint = xscale_add_breakpoint,
3858 .remove_breakpoint = xscale_remove_breakpoint,
3859 .add_watchpoint = xscale_add_watchpoint,
3860 .remove_watchpoint = xscale_remove_watchpoint,
3861
3862 .commands = xscale_command_handlers,
3863 .target_create = xscale_target_create,
3864 .init_target = xscale_init_target,
3865
3866 .virt2phys = xscale_virt2phys,
3867 .mmu = xscale_mmu
3868 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)