XScale: better {read,write}_phys()
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
29
30 #include "breakpoints.h"
31 #include "xscale.h"
32 #include "target_type.h"
33 #include "arm_jtag.h"
34 #include "arm_simulator.h"
35 #include "arm_disassembler.h"
36 #include <helper/time_support.h>
37 #include "register.h"
38 #include "image.h"
39 #include "arm_opcodes.h"
40 #include "armv4_5.h"
41
42
43 /*
44 * Important XScale documents available as of October 2009 include:
45 *
46 * Intel XScale® Core Developer’s Manual, January 2004
47 * Order Number: 273473-002
48 * This has a chapter detailing debug facilities, and punts some
49 * details to chip-specific microarchitecture documents.
50 *
51 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
52 * Document Number: 273539-005
53 * Less detailed than the developer's manual, but summarizes those
54 * missing details (for most XScales) and gives LOTS of notes about
55 * debugger/handler interaction issues. Presents a simpler reset
56 * and load-handler sequence than the arch doc. (Note, OpenOCD
57 * doesn't currently support "Hot-Debug" as defined there.)
58 *
59 * Chip-specific microarchitecture documents may also be useful.
60 */
61
62
63 /* forward declarations */
64 static int xscale_resume(struct target *, int current,
65 uint32_t address, int handle_breakpoints, int debug_execution);
66 static int xscale_debug_entry(struct target *);
67 static int xscale_restore_banked(struct target *);
68 static int xscale_get_reg(struct reg *reg);
69 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
70 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
71 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
72 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
73 static int xscale_read_trace(struct target *);
74
75
76 /* This XScale "debug handler" is loaded into the processor's
77 * mini-ICache, which is 2K of code writable only via JTAG.
78 *
79 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
80 * binary files cleanly. It's string oriented, and terminates them
81 * with a NUL character. Better would be to generate the constants
82 * and let other code decide names, scoping, and other housekeeping.
83 */
84 static /* unsigned const char xscale_debug_handler[] = ... */
85 #include "xscale_debug.h"
86
87 static char *const xscale_reg_list[] =
88 {
89 "XSCALE_MAINID", /* 0 */
90 "XSCALE_CACHETYPE",
91 "XSCALE_CTRL",
92 "XSCALE_AUXCTRL",
93 "XSCALE_TTB",
94 "XSCALE_DAC",
95 "XSCALE_FSR",
96 "XSCALE_FAR",
97 "XSCALE_PID",
98 "XSCALE_CPACCESS",
99 "XSCALE_IBCR0", /* 10 */
100 "XSCALE_IBCR1",
101 "XSCALE_DBR0",
102 "XSCALE_DBR1",
103 "XSCALE_DBCON",
104 "XSCALE_TBREG",
105 "XSCALE_CHKPT0",
106 "XSCALE_CHKPT1",
107 "XSCALE_DCSR",
108 "XSCALE_TX",
109 "XSCALE_RX", /* 20 */
110 "XSCALE_TXRXCTRL",
111 };
112
113 static const struct xscale_reg xscale_reg_arch_info[] =
114 {
115 {XSCALE_MAINID, NULL},
116 {XSCALE_CACHETYPE, NULL},
117 {XSCALE_CTRL, NULL},
118 {XSCALE_AUXCTRL, NULL},
119 {XSCALE_TTB, NULL},
120 {XSCALE_DAC, NULL},
121 {XSCALE_FSR, NULL},
122 {XSCALE_FAR, NULL},
123 {XSCALE_PID, NULL},
124 {XSCALE_CPACCESS, NULL},
125 {XSCALE_IBCR0, NULL},
126 {XSCALE_IBCR1, NULL},
127 {XSCALE_DBR0, NULL},
128 {XSCALE_DBR1, NULL},
129 {XSCALE_DBCON, NULL},
130 {XSCALE_TBREG, NULL},
131 {XSCALE_CHKPT0, NULL},
132 {XSCALE_CHKPT1, NULL},
133 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
134 {-1, NULL}, /* TX accessed via JTAG */
135 {-1, NULL}, /* RX accessed via JTAG */
136 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
137 };
138
139 /* convenience wrapper to access XScale specific registers */
140 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
141 {
142 uint8_t buf[4];
143
144 buf_set_u32(buf, 0, 32, value);
145
146 return xscale_set_reg(reg, buf);
147 }
148
149 static const char xscale_not[] = "target is not an XScale";
150
151 static int xscale_verify_pointer(struct command_context *cmd_ctx,
152 struct xscale_common *xscale)
153 {
154 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
155 command_print(cmd_ctx, xscale_not);
156 return ERROR_TARGET_INVALID;
157 }
158 return ERROR_OK;
159 }
160
161 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr)
162 {
163 if (tap == NULL)
164 return ERROR_FAIL;
165
166 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
167 {
168 struct scan_field field;
169 uint8_t scratch[4];
170
171 memset(&field, 0, sizeof field);
172 field.tap = tap;
173 field.num_bits = tap->ir_length;
174 field.out_value = scratch;
175 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
176
177 jtag_add_ir_scan(1, &field, jtag_get_end_state());
178 }
179
180 return ERROR_OK;
181 }
182
183 static int xscale_read_dcsr(struct target *target)
184 {
185 struct xscale_common *xscale = target_to_xscale(target);
186 int retval;
187 struct scan_field fields[3];
188 uint8_t field0 = 0x0;
189 uint8_t field0_check_value = 0x2;
190 uint8_t field0_check_mask = 0x7;
191 uint8_t field2 = 0x0;
192 uint8_t field2_check_value = 0x0;
193 uint8_t field2_check_mask = 0x1;
194
195 jtag_set_end_state(TAP_DRPAUSE);
196 xscale_jtag_set_instr(target->tap,
197 XSCALE_SELDCSR << xscale->xscale_variant);
198
199 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
200 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
201
202 memset(&fields, 0, sizeof fields);
203
204 fields[0].tap = target->tap;
205 fields[0].num_bits = 3;
206 fields[0].out_value = &field0;
207 uint8_t tmp;
208 fields[0].in_value = &tmp;
209
210 fields[1].tap = target->tap;
211 fields[1].num_bits = 32;
212 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
213
214 fields[2].tap = target->tap;
215 fields[2].num_bits = 1;
216 fields[2].out_value = &field2;
217 uint8_t tmp2;
218 fields[2].in_value = &tmp2;
219
220 jtag_add_dr_scan(3, fields, jtag_get_end_state());
221
222 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
223 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
224
225 if ((retval = jtag_execute_queue()) != ERROR_OK)
226 {
227 LOG_ERROR("JTAG error while reading DCSR");
228 return retval;
229 }
230
231 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
232 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
233
234 /* write the register with the value we just read
235 * on this second pass, only the first bit of field0 is guaranteed to be 0)
236 */
237 field0_check_mask = 0x1;
238 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
239 fields[1].in_value = NULL;
240
241 jtag_set_end_state(TAP_IDLE);
242
243 jtag_add_dr_scan(3, fields, jtag_get_end_state());
244
245 /* DANGER!!! this must be here. It will make sure that the arguments
246 * to jtag_set_check_value() does not go out of scope! */
247 return jtag_execute_queue();
248 }
249
250
251 static void xscale_getbuf(jtag_callback_data_t arg)
252 {
253 uint8_t *in = (uint8_t *)arg;
254 *((uint32_t *)in) = buf_get_u32(in, 0, 32);
255 }
256
257 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
258 {
259 if (num_words == 0)
260 return ERROR_INVALID_ARGUMENTS;
261
262 struct xscale_common *xscale = target_to_xscale(target);
263 int retval = ERROR_OK;
264 tap_state_t path[3];
265 struct scan_field fields[3];
266 uint8_t *field0 = malloc(num_words * 1);
267 uint8_t field0_check_value = 0x2;
268 uint8_t field0_check_mask = 0x6;
269 uint32_t *field1 = malloc(num_words * 4);
270 uint8_t field2_check_value = 0x0;
271 uint8_t field2_check_mask = 0x1;
272 int words_done = 0;
273 int words_scheduled = 0;
274 int i;
275
276 path[0] = TAP_DRSELECT;
277 path[1] = TAP_DRCAPTURE;
278 path[2] = TAP_DRSHIFT;
279
280 memset(&fields, 0, sizeof fields);
281
282 fields[0].tap = target->tap;
283 fields[0].num_bits = 3;
284 fields[0].check_value = &field0_check_value;
285 fields[0].check_mask = &field0_check_mask;
286
287 fields[1].tap = target->tap;
288 fields[1].num_bits = 32;
289
290 fields[2].tap = target->tap;
291 fields[2].num_bits = 1;
292 fields[2].check_value = &field2_check_value;
293 fields[2].check_mask = &field2_check_mask;
294
295 jtag_set_end_state(TAP_IDLE);
296 xscale_jtag_set_instr(target->tap,
297 XSCALE_DBGTX << xscale->xscale_variant);
298 jtag_add_runtest(1, jtag_get_end_state()); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
299
300 /* repeat until all words have been collected */
301 int attempts = 0;
302 while (words_done < num_words)
303 {
304 /* schedule reads */
305 words_scheduled = 0;
306 for (i = words_done; i < num_words; i++)
307 {
308 fields[0].in_value = &field0[i];
309
310 jtag_add_pathmove(3, path);
311
312 fields[1].in_value = (uint8_t *)(field1 + i);
313
314 jtag_add_dr_scan_check(3, fields, jtag_set_end_state(TAP_IDLE));
315
316 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
317
318 words_scheduled++;
319 }
320
321 if ((retval = jtag_execute_queue()) != ERROR_OK)
322 {
323 LOG_ERROR("JTAG error while receiving data from debug handler");
324 break;
325 }
326
327 /* examine results */
328 for (i = words_done; i < num_words; i++)
329 {
330 if (!(field0[0] & 1))
331 {
332 /* move backwards if necessary */
333 int j;
334 for (j = i; j < num_words - 1; j++)
335 {
336 field0[j] = field0[j + 1];
337 field1[j] = field1[j + 1];
338 }
339 words_scheduled--;
340 }
341 }
342 if (words_scheduled == 0)
343 {
344 if (attempts++==1000)
345 {
346 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
347 retval = ERROR_TARGET_TIMEOUT;
348 break;
349 }
350 }
351
352 words_done += words_scheduled;
353 }
354
355 for (i = 0; i < num_words; i++)
356 *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
357
358 free(field1);
359
360 return retval;
361 }
362
363 static int xscale_read_tx(struct target *target, int consume)
364 {
365 struct xscale_common *xscale = target_to_xscale(target);
366 tap_state_t path[3];
367 tap_state_t noconsume_path[6];
368 int retval;
369 struct timeval timeout, now;
370 struct scan_field fields[3];
371 uint8_t field0_in = 0x0;
372 uint8_t field0_check_value = 0x2;
373 uint8_t field0_check_mask = 0x6;
374 uint8_t field2_check_value = 0x0;
375 uint8_t field2_check_mask = 0x1;
376
377 jtag_set_end_state(TAP_IDLE);
378
379 xscale_jtag_set_instr(target->tap,
380 XSCALE_DBGTX << xscale->xscale_variant);
381
382 path[0] = TAP_DRSELECT;
383 path[1] = TAP_DRCAPTURE;
384 path[2] = TAP_DRSHIFT;
385
386 noconsume_path[0] = TAP_DRSELECT;
387 noconsume_path[1] = TAP_DRCAPTURE;
388 noconsume_path[2] = TAP_DREXIT1;
389 noconsume_path[3] = TAP_DRPAUSE;
390 noconsume_path[4] = TAP_DREXIT2;
391 noconsume_path[5] = TAP_DRSHIFT;
392
393 memset(&fields, 0, sizeof fields);
394
395 fields[0].tap = target->tap;
396 fields[0].num_bits = 3;
397 fields[0].in_value = &field0_in;
398
399 fields[1].tap = target->tap;
400 fields[1].num_bits = 32;
401 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
402
403 fields[2].tap = target->tap;
404 fields[2].num_bits = 1;
405 uint8_t tmp;
406 fields[2].in_value = &tmp;
407
408 gettimeofday(&timeout, NULL);
409 timeval_add_time(&timeout, 1, 0);
410
411 for (;;)
412 {
413 /* if we want to consume the register content (i.e. clear TX_READY),
414 * we have to go straight from Capture-DR to Shift-DR
415 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
416 */
417 if (consume)
418 jtag_add_pathmove(3, path);
419 else
420 {
421 jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
422 }
423
424 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
425
426 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
427 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
428
429 if ((retval = jtag_execute_queue()) != ERROR_OK)
430 {
431 LOG_ERROR("JTAG error while reading TX");
432 return ERROR_TARGET_TIMEOUT;
433 }
434
435 gettimeofday(&now, NULL);
436 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
437 {
438 LOG_ERROR("time out reading TX register");
439 return ERROR_TARGET_TIMEOUT;
440 }
441 if (!((!(field0_in & 1)) && consume))
442 {
443 goto done;
444 }
445 if (debug_level >= 3)
446 {
447 LOG_DEBUG("waiting 100ms");
448 alive_sleep(100); /* avoid flooding the logs */
449 } else
450 {
451 keep_alive();
452 }
453 }
454 done:
455
456 if (!(field0_in & 1))
457 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
458
459 return ERROR_OK;
460 }
461
462 static int xscale_write_rx(struct target *target)
463 {
464 struct xscale_common *xscale = target_to_xscale(target);
465 int retval;
466 struct timeval timeout, now;
467 struct scan_field fields[3];
468 uint8_t field0_out = 0x0;
469 uint8_t field0_in = 0x0;
470 uint8_t field0_check_value = 0x2;
471 uint8_t field0_check_mask = 0x6;
472 uint8_t field2 = 0x0;
473 uint8_t field2_check_value = 0x0;
474 uint8_t field2_check_mask = 0x1;
475
476 jtag_set_end_state(TAP_IDLE);
477
478 xscale_jtag_set_instr(target->tap,
479 XSCALE_DBGRX << xscale->xscale_variant);
480
481 memset(&fields, 0, sizeof fields);
482
483 fields[0].tap = target->tap;
484 fields[0].num_bits = 3;
485 fields[0].out_value = &field0_out;
486 fields[0].in_value = &field0_in;
487
488 fields[1].tap = target->tap;
489 fields[1].num_bits = 32;
490 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
491
492 fields[2].tap = target->tap;
493 fields[2].num_bits = 1;
494 fields[2].out_value = &field2;
495 uint8_t tmp;
496 fields[2].in_value = &tmp;
497
498 gettimeofday(&timeout, NULL);
499 timeval_add_time(&timeout, 1, 0);
500
501 /* poll until rx_read is low */
502 LOG_DEBUG("polling RX");
503 for (;;)
504 {
505 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
506
507 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
508 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
509
510 if ((retval = jtag_execute_queue()) != ERROR_OK)
511 {
512 LOG_ERROR("JTAG error while writing RX");
513 return retval;
514 }
515
516 gettimeofday(&now, NULL);
517 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
518 {
519 LOG_ERROR("time out writing RX register");
520 return ERROR_TARGET_TIMEOUT;
521 }
522 if (!(field0_in & 1))
523 goto done;
524 if (debug_level >= 3)
525 {
526 LOG_DEBUG("waiting 100ms");
527 alive_sleep(100); /* avoid flooding the logs */
528 } else
529 {
530 keep_alive();
531 }
532 }
533 done:
534
535 /* set rx_valid */
536 field2 = 0x1;
537 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
538
539 if ((retval = jtag_execute_queue()) != ERROR_OK)
540 {
541 LOG_ERROR("JTAG error while writing RX");
542 return retval;
543 }
544
545 return ERROR_OK;
546 }
547
548 /* send count elements of size byte to the debug handler */
549 static int xscale_send(struct target *target, uint8_t *buffer, int count, int size)
550 {
551 struct xscale_common *xscale = target_to_xscale(target);
552 uint32_t t[3];
553 int bits[3];
554 int retval;
555 int done_count = 0;
556
557 jtag_set_end_state(TAP_IDLE);
558
559 xscale_jtag_set_instr(target->tap,
560 XSCALE_DBGRX << xscale->xscale_variant);
561
562 bits[0]=3;
563 t[0]=0;
564 bits[1]=32;
565 t[2]=1;
566 bits[2]=1;
567 int endianness = target->endianness;
568 while (done_count++ < count)
569 {
570 switch (size)
571 {
572 case 4:
573 if (endianness == TARGET_LITTLE_ENDIAN)
574 {
575 t[1]=le_to_h_u32(buffer);
576 } else
577 {
578 t[1]=be_to_h_u32(buffer);
579 }
580 break;
581 case 2:
582 if (endianness == TARGET_LITTLE_ENDIAN)
583 {
584 t[1]=le_to_h_u16(buffer);
585 } else
586 {
587 t[1]=be_to_h_u16(buffer);
588 }
589 break;
590 case 1:
591 t[1]=buffer[0];
592 break;
593 default:
594 LOG_ERROR("BUG: size neither 4, 2 nor 1");
595 return ERROR_INVALID_ARGUMENTS;
596 }
597 jtag_add_dr_out(target->tap,
598 3,
599 bits,
600 t,
601 jtag_set_end_state(TAP_IDLE));
602 buffer += size;
603 }
604
605 if ((retval = jtag_execute_queue()) != ERROR_OK)
606 {
607 LOG_ERROR("JTAG error while sending data to debug handler");
608 return retval;
609 }
610
611 return ERROR_OK;
612 }
613
614 static int xscale_send_u32(struct target *target, uint32_t value)
615 {
616 struct xscale_common *xscale = target_to_xscale(target);
617
618 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
619 return xscale_write_rx(target);
620 }
621
622 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
623 {
624 struct xscale_common *xscale = target_to_xscale(target);
625 int retval;
626 struct scan_field fields[3];
627 uint8_t field0 = 0x0;
628 uint8_t field0_check_value = 0x2;
629 uint8_t field0_check_mask = 0x7;
630 uint8_t field2 = 0x0;
631 uint8_t field2_check_value = 0x0;
632 uint8_t field2_check_mask = 0x1;
633
634 if (hold_rst != -1)
635 xscale->hold_rst = hold_rst;
636
637 if (ext_dbg_brk != -1)
638 xscale->external_debug_break = ext_dbg_brk;
639
640 jtag_set_end_state(TAP_IDLE);
641 xscale_jtag_set_instr(target->tap,
642 XSCALE_SELDCSR << xscale->xscale_variant);
643
644 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
645 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
646
647 memset(&fields, 0, sizeof fields);
648
649 fields[0].tap = target->tap;
650 fields[0].num_bits = 3;
651 fields[0].out_value = &field0;
652 uint8_t tmp;
653 fields[0].in_value = &tmp;
654
655 fields[1].tap = target->tap;
656 fields[1].num_bits = 32;
657 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
658
659 fields[2].tap = target->tap;
660 fields[2].num_bits = 1;
661 fields[2].out_value = &field2;
662 uint8_t tmp2;
663 fields[2].in_value = &tmp2;
664
665 jtag_add_dr_scan(3, fields, jtag_get_end_state());
666
667 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
668 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
669
670 if ((retval = jtag_execute_queue()) != ERROR_OK)
671 {
672 LOG_ERROR("JTAG error while writing DCSR");
673 return retval;
674 }
675
676 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
677 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
678
679 return ERROR_OK;
680 }
681
682 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
683 static unsigned int parity (unsigned int v)
684 {
685 // unsigned int ov = v;
686 v ^= v >> 16;
687 v ^= v >> 8;
688 v ^= v >> 4;
689 v &= 0xf;
690 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
691 return (0x6996 >> v) & 1;
692 }
693
694 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
695 {
696 struct xscale_common *xscale = target_to_xscale(target);
697 uint8_t packet[4];
698 uint8_t cmd;
699 int word;
700 struct scan_field fields[2];
701
702 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
703
704 /* LDIC into IR */
705 jtag_set_end_state(TAP_IDLE);
706 xscale_jtag_set_instr(target->tap,
707 XSCALE_LDIC << xscale->xscale_variant);
708
709 /* CMD is b011 to load a cacheline into the Mini ICache.
710 * Loading into the main ICache is deprecated, and unused.
711 * It's followed by three zero bits, and 27 address bits.
712 */
713 buf_set_u32(&cmd, 0, 6, 0x3);
714
715 /* virtual address of desired cache line */
716 buf_set_u32(packet, 0, 27, va >> 5);
717
718 memset(&fields, 0, sizeof fields);
719
720 fields[0].tap = target->tap;
721 fields[0].num_bits = 6;
722 fields[0].out_value = &cmd;
723
724 fields[1].tap = target->tap;
725 fields[1].num_bits = 27;
726 fields[1].out_value = packet;
727
728 jtag_add_dr_scan(2, fields, jtag_get_end_state());
729
730 /* rest of packet is a cacheline: 8 instructions, with parity */
731 fields[0].num_bits = 32;
732 fields[0].out_value = packet;
733
734 fields[1].num_bits = 1;
735 fields[1].out_value = &cmd;
736
737 for (word = 0; word < 8; word++)
738 {
739 buf_set_u32(packet, 0, 32, buffer[word]);
740
741 uint32_t value;
742 memcpy(&value, packet, sizeof(uint32_t));
743 cmd = parity(value);
744
745 jtag_add_dr_scan(2, fields, jtag_get_end_state());
746 }
747
748 return jtag_execute_queue();
749 }
750
751 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
752 {
753 struct xscale_common *xscale = target_to_xscale(target);
754 uint8_t packet[4];
755 uint8_t cmd;
756 struct scan_field fields[2];
757
758 jtag_set_end_state(TAP_IDLE);
759 xscale_jtag_set_instr(target->tap,
760 XSCALE_LDIC << xscale->xscale_variant);
761
762 /* CMD for invalidate IC line b000, bits [6:4] b000 */
763 buf_set_u32(&cmd, 0, 6, 0x0);
764
765 /* virtual address of desired cache line */
766 buf_set_u32(packet, 0, 27, va >> 5);
767
768 memset(&fields, 0, sizeof fields);
769
770 fields[0].tap = target->tap;
771 fields[0].num_bits = 6;
772 fields[0].out_value = &cmd;
773
774 fields[1].tap = target->tap;
775 fields[1].num_bits = 27;
776 fields[1].out_value = packet;
777
778 jtag_add_dr_scan(2, fields, jtag_get_end_state());
779
780 return ERROR_OK;
781 }
782
783 static int xscale_update_vectors(struct target *target)
784 {
785 struct xscale_common *xscale = target_to_xscale(target);
786 int i;
787 int retval;
788
789 uint32_t low_reset_branch, high_reset_branch;
790
791 for (i = 1; i < 8; i++)
792 {
793 /* if there's a static vector specified for this exception, override */
794 if (xscale->static_high_vectors_set & (1 << i))
795 {
796 xscale->high_vectors[i] = xscale->static_high_vectors[i];
797 }
798 else
799 {
800 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
801 if (retval == ERROR_TARGET_TIMEOUT)
802 return retval;
803 if (retval != ERROR_OK)
804 {
805 /* Some of these reads will fail as part of normal execution */
806 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
807 }
808 }
809 }
810
811 for (i = 1; i < 8; i++)
812 {
813 if (xscale->static_low_vectors_set & (1 << i))
814 {
815 xscale->low_vectors[i] = xscale->static_low_vectors[i];
816 }
817 else
818 {
819 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
820 if (retval == ERROR_TARGET_TIMEOUT)
821 return retval;
822 if (retval != ERROR_OK)
823 {
824 /* Some of these reads will fail as part of normal execution */
825 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
826 }
827 }
828 }
829
830 /* calculate branches to debug handler */
831 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
832 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
833
834 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
835 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
836
837 /* invalidate and load exception vectors in mini i-cache */
838 xscale_invalidate_ic_line(target, 0x0);
839 xscale_invalidate_ic_line(target, 0xffff0000);
840
841 xscale_load_ic(target, 0x0, xscale->low_vectors);
842 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
843
844 return ERROR_OK;
845 }
846
847 static int xscale_arch_state(struct target *target)
848 {
849 struct xscale_common *xscale = target_to_xscale(target);
850 struct arm *armv4_5 = &xscale->armv4_5_common;
851
852 static const char *state[] =
853 {
854 "disabled", "enabled"
855 };
856
857 static const char *arch_dbg_reason[] =
858 {
859 "", "\n(processor reset)", "\n(trace buffer full)"
860 };
861
862 if (armv4_5->common_magic != ARM_COMMON_MAGIC)
863 {
864 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
865 return ERROR_INVALID_ARGUMENTS;
866 }
867
868 arm_arch_state(target);
869 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s%s",
870 state[xscale->armv4_5_mmu.mmu_enabled],
871 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
872 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
873 arch_dbg_reason[xscale->arch_debug_reason]);
874
875 return ERROR_OK;
876 }
877
878 static int xscale_poll(struct target *target)
879 {
880 int retval = ERROR_OK;
881
882 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
883 {
884 enum target_state previous_state = target->state;
885 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
886 {
887
888 /* there's data to read from the tx register, we entered debug state */
889 target->state = TARGET_HALTED;
890
891 /* process debug entry, fetching current mode regs */
892 retval = xscale_debug_entry(target);
893 }
894 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
895 {
896 LOG_USER("error while polling TX register, reset CPU");
897 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
898 target->state = TARGET_HALTED;
899 }
900
901 /* debug_entry could have overwritten target state (i.e. immediate resume)
902 * don't signal event handlers in that case
903 */
904 if (target->state != TARGET_HALTED)
905 return ERROR_OK;
906
907 /* if target was running, signal that we halted
908 * otherwise we reentered from debug execution */
909 if (previous_state == TARGET_RUNNING)
910 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
911 else
912 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
913 }
914
915 return retval;
916 }
917
918 static int xscale_debug_entry(struct target *target)
919 {
920 struct xscale_common *xscale = target_to_xscale(target);
921 struct arm *armv4_5 = &xscale->armv4_5_common;
922 uint32_t pc;
923 uint32_t buffer[10];
924 int i;
925 int retval;
926 uint32_t moe;
927
928 /* clear external dbg break (will be written on next DCSR read) */
929 xscale->external_debug_break = 0;
930 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
931 return retval;
932
933 /* get r0, pc, r1 to r7 and cpsr */
934 if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
935 return retval;
936
937 /* move r0 from buffer to register cache */
938 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
939 armv4_5->core_cache->reg_list[0].dirty = 1;
940 armv4_5->core_cache->reg_list[0].valid = 1;
941 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
942
943 /* move pc from buffer to register cache */
944 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
945 armv4_5->core_cache->reg_list[15].dirty = 1;
946 armv4_5->core_cache->reg_list[15].valid = 1;
947 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
948
949 /* move data from buffer to register cache */
950 for (i = 1; i <= 7; i++)
951 {
952 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
953 armv4_5->core_cache->reg_list[i].dirty = 1;
954 armv4_5->core_cache->reg_list[i].valid = 1;
955 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
956 }
957
958 arm_set_cpsr(armv4_5, buffer[9]);
959 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
960
961 if (!is_arm_mode(armv4_5->core_mode))
962 {
963 target->state = TARGET_UNKNOWN;
964 LOG_ERROR("cpsr contains invalid mode value - communication failure");
965 return ERROR_TARGET_FAILURE;
966 }
967 LOG_DEBUG("target entered debug state in %s mode",
968 arm_mode_name(armv4_5->core_mode));
969
970 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
971 if (armv4_5->spsr) {
972 xscale_receive(target, buffer, 8);
973 buf_set_u32(armv4_5->spsr->value, 0, 32, buffer[7]);
974 armv4_5->spsr->dirty = false;
975 armv4_5->spsr->valid = true;
976 }
977 else
978 {
979 /* r8 to r14, but no spsr */
980 xscale_receive(target, buffer, 7);
981 }
982
983 /* move data from buffer to right banked register in cache */
984 for (i = 8; i <= 14; i++)
985 {
986 struct reg *r = arm_reg_current(armv4_5, i);
987
988 buf_set_u32(r->value, 0, 32, buffer[i - 8]);
989 r->dirty = false;
990 r->valid = true;
991 }
992
993 /* examine debug reason */
994 xscale_read_dcsr(target);
995 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
996
997 /* stored PC (for calculating fixup) */
998 pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
999
1000 switch (moe)
1001 {
1002 case 0x0: /* Processor reset */
1003 target->debug_reason = DBG_REASON_DBGRQ;
1004 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
1005 pc -= 4;
1006 break;
1007 case 0x1: /* Instruction breakpoint hit */
1008 target->debug_reason = DBG_REASON_BREAKPOINT;
1009 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1010 pc -= 4;
1011 break;
1012 case 0x2: /* Data breakpoint hit */
1013 target->debug_reason = DBG_REASON_WATCHPOINT;
1014 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1015 pc -= 4;
1016 break;
1017 case 0x3: /* BKPT instruction executed */
1018 target->debug_reason = DBG_REASON_BREAKPOINT;
1019 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1020 pc -= 4;
1021 break;
1022 case 0x4: /* Ext. debug event */
1023 target->debug_reason = DBG_REASON_DBGRQ;
1024 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1025 pc -= 4;
1026 break;
1027 case 0x5: /* Vector trap occured */
1028 target->debug_reason = DBG_REASON_BREAKPOINT;
1029 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1030 pc -= 4;
1031 break;
1032 case 0x6: /* Trace buffer full break */
1033 target->debug_reason = DBG_REASON_DBGRQ;
1034 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1035 pc -= 4;
1036 break;
1037 case 0x7: /* Reserved (may flag Hot-Debug support) */
1038 default:
1039 LOG_ERROR("Method of Entry is 'Reserved'");
1040 exit(-1);
1041 break;
1042 }
1043
1044 /* apply PC fixup */
1045 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
1046
1047 /* on the first debug entry, identify cache type */
1048 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1049 {
1050 uint32_t cache_type_reg;
1051
1052 /* read cp15 cache type register */
1053 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1054 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1055
1056 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1057 }
1058
1059 /* examine MMU and Cache settings */
1060 /* read cp15 control register */
1061 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1062 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1063 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1064 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1065 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1066
1067 /* tracing enabled, read collected trace data */
1068 if (xscale->trace.buffer_enabled)
1069 {
1070 xscale_read_trace(target);
1071 xscale->trace.buffer_fill--;
1072
1073 /* resume if we're still collecting trace data */
1074 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1075 && (xscale->trace.buffer_fill > 0))
1076 {
1077 xscale_resume(target, 1, 0x0, 1, 0);
1078 }
1079 else
1080 {
1081 xscale->trace.buffer_enabled = 0;
1082 }
1083 }
1084
1085 return ERROR_OK;
1086 }
1087
1088 static int xscale_halt(struct target *target)
1089 {
1090 struct xscale_common *xscale = target_to_xscale(target);
1091
1092 LOG_DEBUG("target->state: %s",
1093 target_state_name(target));
1094
1095 if (target->state == TARGET_HALTED)
1096 {
1097 LOG_DEBUG("target was already halted");
1098 return ERROR_OK;
1099 }
1100 else if (target->state == TARGET_UNKNOWN)
1101 {
1102 /* this must not happen for a xscale target */
1103 LOG_ERROR("target was in unknown state when halt was requested");
1104 return ERROR_TARGET_INVALID;
1105 }
1106 else if (target->state == TARGET_RESET)
1107 {
1108 LOG_DEBUG("target->state == TARGET_RESET");
1109 }
1110 else
1111 {
1112 /* assert external dbg break */
1113 xscale->external_debug_break = 1;
1114 xscale_read_dcsr(target);
1115
1116 target->debug_reason = DBG_REASON_DBGRQ;
1117 }
1118
1119 return ERROR_OK;
1120 }
1121
1122 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1123 {
1124 struct xscale_common *xscale = target_to_xscale(target);
1125 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1126 int retval;
1127
1128 if (xscale->ibcr0_used)
1129 {
1130 struct breakpoint *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1131
1132 if (ibcr0_bp)
1133 {
1134 xscale_unset_breakpoint(target, ibcr0_bp);
1135 }
1136 else
1137 {
1138 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1139 exit(-1);
1140 }
1141 }
1142
1143 if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
1144 return retval;
1145
1146 return ERROR_OK;
1147 }
1148
1149 static int xscale_disable_single_step(struct target *target)
1150 {
1151 struct xscale_common *xscale = target_to_xscale(target);
1152 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1153 int retval;
1154
1155 if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
1156 return retval;
1157
1158 return ERROR_OK;
1159 }
1160
1161 static void xscale_enable_watchpoints(struct target *target)
1162 {
1163 struct watchpoint *watchpoint = target->watchpoints;
1164
1165 while (watchpoint)
1166 {
1167 if (watchpoint->set == 0)
1168 xscale_set_watchpoint(target, watchpoint);
1169 watchpoint = watchpoint->next;
1170 }
1171 }
1172
1173 static void xscale_enable_breakpoints(struct target *target)
1174 {
1175 struct breakpoint *breakpoint = target->breakpoints;
1176
1177 /* set any pending breakpoints */
1178 while (breakpoint)
1179 {
1180 if (breakpoint->set == 0)
1181 xscale_set_breakpoint(target, breakpoint);
1182 breakpoint = breakpoint->next;
1183 }
1184 }
1185
1186 static int xscale_resume(struct target *target, int current,
1187 uint32_t address, int handle_breakpoints, int debug_execution)
1188 {
1189 struct xscale_common *xscale = target_to_xscale(target);
1190 struct arm *armv4_5 = &xscale->armv4_5_common;
1191 struct breakpoint *breakpoint = target->breakpoints;
1192 uint32_t current_pc;
1193 int retval;
1194 int i;
1195
1196 LOG_DEBUG("-");
1197
1198 if (target->state != TARGET_HALTED)
1199 {
1200 LOG_WARNING("target not halted");
1201 return ERROR_TARGET_NOT_HALTED;
1202 }
1203
1204 if (!debug_execution)
1205 {
1206 target_free_all_working_areas(target);
1207 }
1208
1209 /* update vector tables */
1210 if ((retval = xscale_update_vectors(target)) != ERROR_OK)
1211 return retval;
1212
1213 /* current = 1: continue on current pc, otherwise continue at <address> */
1214 if (!current)
1215 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1216
1217 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1218
1219 /* if we're at the reset vector, we have to simulate the branch */
1220 if (current_pc == 0x0)
1221 {
1222 arm_simulate_step(target, NULL);
1223 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1224 }
1225
1226 /* the front-end may request us not to handle breakpoints */
1227 if (handle_breakpoints)
1228 {
1229 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1230 {
1231 uint32_t next_pc;
1232
1233 /* there's a breakpoint at the current PC, we have to step over it */
1234 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1235 xscale_unset_breakpoint(target, breakpoint);
1236
1237 /* calculate PC of next instruction */
1238 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1239 {
1240 uint32_t current_opcode;
1241 target_read_u32(target, current_pc, &current_opcode);
1242 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1243 }
1244
1245 LOG_DEBUG("enable single-step");
1246 xscale_enable_single_step(target, next_pc);
1247
1248 /* restore banked registers */
1249 retval = xscale_restore_banked(target);
1250
1251 /* send resume request (command 0x30 or 0x31)
1252 * clean the trace buffer if it is to be enabled (0x62) */
1253 if (xscale->trace.buffer_enabled)
1254 {
1255 xscale_send_u32(target, 0x62);
1256 xscale_send_u32(target, 0x31);
1257 }
1258 else
1259 xscale_send_u32(target, 0x30);
1260
1261 /* send CPSR */
1262 xscale_send_u32(target,
1263 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1264 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1265 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1266
1267 for (i = 7; i >= 0; i--)
1268 {
1269 /* send register */
1270 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1271 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1272 }
1273
1274 /* send PC */
1275 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1276 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1277
1278 /* wait for and process debug entry */
1279 xscale_debug_entry(target);
1280
1281 LOG_DEBUG("disable single-step");
1282 xscale_disable_single_step(target);
1283
1284 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1285 xscale_set_breakpoint(target, breakpoint);
1286 }
1287 }
1288
1289 /* enable any pending breakpoints and watchpoints */
1290 xscale_enable_breakpoints(target);
1291 xscale_enable_watchpoints(target);
1292
1293 /* restore banked registers */
1294 retval = xscale_restore_banked(target);
1295
1296 /* send resume request (command 0x30 or 0x31)
1297 * clean the trace buffer if it is to be enabled (0x62) */
1298 if (xscale->trace.buffer_enabled)
1299 {
1300 xscale_send_u32(target, 0x62);
1301 xscale_send_u32(target, 0x31);
1302 }
1303 else
1304 xscale_send_u32(target, 0x30);
1305
1306 /* send CPSR */
1307 xscale_send_u32(target, buf_get_u32(armv4_5->cpsr->value, 0, 32));
1308 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1309 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1310
1311 for (i = 7; i >= 0; i--)
1312 {
1313 /* send register */
1314 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1315 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1316 }
1317
1318 /* send PC */
1319 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1320 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1321
1322 target->debug_reason = DBG_REASON_NOTHALTED;
1323
1324 if (!debug_execution)
1325 {
1326 /* registers are now invalid */
1327 register_cache_invalidate(armv4_5->core_cache);
1328 target->state = TARGET_RUNNING;
1329 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1330 }
1331 else
1332 {
1333 target->state = TARGET_DEBUG_RUNNING;
1334 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1335 }
1336
1337 LOG_DEBUG("target resumed");
1338
1339 return ERROR_OK;
1340 }
1341
1342 static int xscale_step_inner(struct target *target, int current,
1343 uint32_t address, int handle_breakpoints)
1344 {
1345 struct xscale_common *xscale = target_to_xscale(target);
1346 struct arm *armv4_5 = &xscale->armv4_5_common;
1347 uint32_t next_pc;
1348 int retval;
1349 int i;
1350
1351 target->debug_reason = DBG_REASON_SINGLESTEP;
1352
1353 /* calculate PC of next instruction */
1354 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1355 {
1356 uint32_t current_opcode, current_pc;
1357 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1358
1359 target_read_u32(target, current_pc, &current_opcode);
1360 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1361 return retval;
1362 }
1363
1364 LOG_DEBUG("enable single-step");
1365 if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
1366 return retval;
1367
1368 /* restore banked registers */
1369 if ((retval = xscale_restore_banked(target)) != ERROR_OK)
1370 return retval;
1371
1372 /* send resume request (command 0x30 or 0x31)
1373 * clean the trace buffer if it is to be enabled (0x62) */
1374 if (xscale->trace.buffer_enabled)
1375 {
1376 if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
1377 return retval;
1378 if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
1379 return retval;
1380 }
1381 else
1382 if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
1383 return retval;
1384
1385 /* send CPSR */
1386 retval = xscale_send_u32(target,
1387 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1388 if (retval != ERROR_OK)
1389 return retval;
1390 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1391 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1392
1393 for (i = 7; i >= 0; i--)
1394 {
1395 /* send register */
1396 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
1397 return retval;
1398 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1399 }
1400
1401 /* send PC */
1402 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))) != ERROR_OK)
1403 return retval;
1404 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1405
1406 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1407
1408 /* registers are now invalid */
1409 register_cache_invalidate(armv4_5->core_cache);
1410
1411 /* wait for and process debug entry */
1412 if ((retval = xscale_debug_entry(target)) != ERROR_OK)
1413 return retval;
1414
1415 LOG_DEBUG("disable single-step");
1416 if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
1417 return retval;
1418
1419 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1420
1421 return ERROR_OK;
1422 }
1423
1424 static int xscale_step(struct target *target, int current,
1425 uint32_t address, int handle_breakpoints)
1426 {
1427 struct arm *armv4_5 = target_to_arm(target);
1428 struct breakpoint *breakpoint = target->breakpoints;
1429
1430 uint32_t current_pc;
1431 int retval;
1432
1433 if (target->state != TARGET_HALTED)
1434 {
1435 LOG_WARNING("target not halted");
1436 return ERROR_TARGET_NOT_HALTED;
1437 }
1438
1439 /* current = 1: continue on current pc, otherwise continue at <address> */
1440 if (!current)
1441 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1442
1443 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1444
1445 /* if we're at the reset vector, we have to simulate the step */
1446 if (current_pc == 0x0)
1447 {
1448 if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
1449 return retval;
1450 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1451
1452 target->debug_reason = DBG_REASON_SINGLESTEP;
1453 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1454
1455 return ERROR_OK;
1456 }
1457
1458 /* the front-end may request us not to handle breakpoints */
1459 if (handle_breakpoints)
1460 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1461 {
1462 if ((retval = xscale_unset_breakpoint(target, breakpoint)) != ERROR_OK)
1463 return retval;
1464 }
1465
1466 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1467
1468 if (breakpoint)
1469 {
1470 xscale_set_breakpoint(target, breakpoint);
1471 }
1472
1473 LOG_DEBUG("target stepped");
1474
1475 return ERROR_OK;
1476
1477 }
1478
1479 static int xscale_assert_reset(struct target *target)
1480 {
1481 struct xscale_common *xscale = target_to_xscale(target);
1482
1483 LOG_DEBUG("target->state: %s",
1484 target_state_name(target));
1485
1486 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1487 * end up in T-L-R, which would reset JTAG
1488 */
1489 jtag_set_end_state(TAP_IDLE);
1490 xscale_jtag_set_instr(target->tap,
1491 XSCALE_SELDCSR << xscale->xscale_variant);
1492
1493 /* set Hold reset, Halt mode and Trap Reset */
1494 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1495 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1496 xscale_write_dcsr(target, 1, 0);
1497
1498 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1499 xscale_jtag_set_instr(target->tap, ~0);
1500 jtag_execute_queue();
1501
1502 /* assert reset */
1503 jtag_add_reset(0, 1);
1504
1505 /* sleep 1ms, to be sure we fulfill any requirements */
1506 jtag_add_sleep(1000);
1507 jtag_execute_queue();
1508
1509 target->state = TARGET_RESET;
1510
1511 if (target->reset_halt)
1512 {
1513 int retval;
1514 if ((retval = target_halt(target)) != ERROR_OK)
1515 return retval;
1516 }
1517
1518 return ERROR_OK;
1519 }
1520
1521 static int xscale_deassert_reset(struct target *target)
1522 {
1523 struct xscale_common *xscale = target_to_xscale(target);
1524 struct breakpoint *breakpoint = target->breakpoints;
1525
1526 LOG_DEBUG("-");
1527
1528 xscale->ibcr_available = 2;
1529 xscale->ibcr0_used = 0;
1530 xscale->ibcr1_used = 0;
1531
1532 xscale->dbr_available = 2;
1533 xscale->dbr0_used = 0;
1534 xscale->dbr1_used = 0;
1535
1536 /* mark all hardware breakpoints as unset */
1537 while (breakpoint)
1538 {
1539 if (breakpoint->type == BKPT_HARD)
1540 {
1541 breakpoint->set = 0;
1542 }
1543 breakpoint = breakpoint->next;
1544 }
1545
1546 register_cache_invalidate(xscale->armv4_5_common.core_cache);
1547
1548 /* FIXME mark hardware watchpoints got unset too. Also,
1549 * at least some of the XScale registers are invalid...
1550 */
1551
1552 /*
1553 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1554 * contents got invalidated. Safer to force that, so writing new
1555 * contents can't ever fail..
1556 */
1557 {
1558 uint32_t address;
1559 unsigned buf_cnt;
1560 const uint8_t *buffer = xscale_debug_handler;
1561 int retval;
1562
1563 /* release SRST */
1564 jtag_add_reset(0, 0);
1565
1566 /* wait 300ms; 150 and 100ms were not enough */
1567 jtag_add_sleep(300*1000);
1568
1569 jtag_add_runtest(2030, jtag_set_end_state(TAP_IDLE));
1570 jtag_execute_queue();
1571
1572 /* set Hold reset, Halt mode and Trap Reset */
1573 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1574 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1575 xscale_write_dcsr(target, 1, 0);
1576
1577 /* Load the debug handler into the mini-icache. Since
1578 * it's using halt mode (not monitor mode), it runs in
1579 * "Special Debug State" for access to registers, memory,
1580 * coprocessors, trace data, etc.
1581 */
1582 address = xscale->handler_address;
1583 for (unsigned binary_size = sizeof xscale_debug_handler - 1;
1584 binary_size > 0;
1585 binary_size -= buf_cnt, buffer += buf_cnt)
1586 {
1587 uint32_t cache_line[8];
1588 unsigned i;
1589
1590 buf_cnt = binary_size;
1591 if (buf_cnt > 32)
1592 buf_cnt = 32;
1593
1594 for (i = 0; i < buf_cnt; i += 4)
1595 {
1596 /* convert LE buffer to host-endian uint32_t */
1597 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1598 }
1599
1600 for (; i < 32; i += 4)
1601 {
1602 cache_line[i / 4] = 0xe1a08008;
1603 }
1604
1605 /* only load addresses other than the reset vectors */
1606 if ((address % 0x400) != 0x0)
1607 {
1608 retval = xscale_load_ic(target, address,
1609 cache_line);
1610 if (retval != ERROR_OK)
1611 return retval;
1612 }
1613
1614 address += buf_cnt;
1615 };
1616
1617 retval = xscale_load_ic(target, 0x0,
1618 xscale->low_vectors);
1619 if (retval != ERROR_OK)
1620 return retval;
1621 retval = xscale_load_ic(target, 0xffff0000,
1622 xscale->high_vectors);
1623 if (retval != ERROR_OK)
1624 return retval;
1625
1626 jtag_add_runtest(30, jtag_set_end_state(TAP_IDLE));
1627
1628 jtag_add_sleep(100000);
1629
1630 /* set Hold reset, Halt mode and Trap Reset */
1631 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1632 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1633 xscale_write_dcsr(target, 1, 0);
1634
1635 /* clear Hold reset to let the target run (should enter debug handler) */
1636 xscale_write_dcsr(target, 0, 1);
1637 target->state = TARGET_RUNNING;
1638
1639 if (!target->reset_halt)
1640 {
1641 jtag_add_sleep(10000);
1642
1643 /* we should have entered debug now */
1644 xscale_debug_entry(target);
1645 target->state = TARGET_HALTED;
1646
1647 /* resume the target */
1648 xscale_resume(target, 1, 0x0, 1, 0);
1649 }
1650 }
1651
1652 return ERROR_OK;
1653 }
1654
1655 static int xscale_read_core_reg(struct target *target, struct reg *r,
1656 int num, enum arm_mode mode)
1657 {
1658 /** \todo add debug handler support for core register reads */
1659 LOG_ERROR("not implemented");
1660 return ERROR_OK;
1661 }
1662
1663 static int xscale_write_core_reg(struct target *target, struct reg *r,
1664 int num, enum arm_mode mode, uint32_t value)
1665 {
1666 /** \todo add debug handler support for core register writes */
1667 LOG_ERROR("not implemented");
1668 return ERROR_OK;
1669 }
1670
1671 static int xscale_full_context(struct target *target)
1672 {
1673 struct arm *armv4_5 = target_to_arm(target);
1674
1675 uint32_t *buffer;
1676
1677 int i, j;
1678
1679 LOG_DEBUG("-");
1680
1681 if (target->state != TARGET_HALTED)
1682 {
1683 LOG_WARNING("target not halted");
1684 return ERROR_TARGET_NOT_HALTED;
1685 }
1686
1687 buffer = malloc(4 * 8);
1688
1689 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1690 * we can't enter User mode on an XScale (unpredictable),
1691 * but User shares registers with SYS
1692 */
1693 for (i = 1; i < 7; i++)
1694 {
1695 enum arm_mode mode = armv4_5_number_to_mode(i);
1696 bool valid = true;
1697 struct reg *r;
1698
1699 if (mode == ARM_MODE_USR)
1700 continue;
1701
1702 /* check if there are invalid registers in the current mode
1703 */
1704 for (j = 0; valid && j <= 16; j++)
1705 {
1706 if (!ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1707 mode, j).valid)
1708 valid = false;
1709 }
1710 if (valid)
1711 continue;
1712
1713 /* request banked registers */
1714 xscale_send_u32(target, 0x0);
1715
1716 /* send CPSR for desired bank mode */
1717 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1718
1719 /* get banked registers: r8 to r14; and SPSR
1720 * except in USR/SYS mode
1721 */
1722 if (mode != ARM_MODE_SYS) {
1723 /* SPSR */
1724 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1725 mode, 16);
1726
1727 xscale_receive(target, buffer, 8);
1728
1729 buf_set_u32(r->value, 0, 32, buffer[7]);
1730 r->dirty = false;
1731 r->valid = true;
1732 } else {
1733 xscale_receive(target, buffer, 7);
1734 }
1735
1736 /* move data from buffer to register cache */
1737 for (j = 8; j <= 14; j++)
1738 {
1739 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1740 mode, j);
1741
1742 buf_set_u32(r->value, 0, 32, buffer[j - 8]);
1743 r->dirty = false;
1744 r->valid = true;
1745 }
1746 }
1747
1748 free(buffer);
1749
1750 return ERROR_OK;
1751 }
1752
1753 static int xscale_restore_banked(struct target *target)
1754 {
1755 struct arm *armv4_5 = target_to_arm(target);
1756
1757 int i, j;
1758
1759 if (target->state != TARGET_HALTED)
1760 {
1761 LOG_WARNING("target not halted");
1762 return ERROR_TARGET_NOT_HALTED;
1763 }
1764
1765 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1766 * and check if any banked registers need to be written. Ignore
1767 * USR mode (number 0) in favor of SYS; we can't enter User mode on
1768 * an XScale (unpredictable), but they share all registers.
1769 */
1770 for (i = 1; i < 7; i++)
1771 {
1772 enum arm_mode mode = armv4_5_number_to_mode(i);
1773 struct reg *r;
1774
1775 if (mode == ARM_MODE_USR)
1776 continue;
1777
1778 /* check if there are dirty registers in this mode */
1779 for (j = 8; j <= 14; j++)
1780 {
1781 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1782 mode, j).dirty)
1783 goto dirty;
1784 }
1785
1786 /* if not USR/SYS, check if the SPSR needs to be written */
1787 if (mode != ARM_MODE_SYS)
1788 {
1789 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1790 mode, 16).dirty)
1791 goto dirty;
1792 }
1793
1794 /* there's nothing to flush for this mode */
1795 continue;
1796
1797 dirty:
1798 /* command 0x1: "send banked registers" */
1799 xscale_send_u32(target, 0x1);
1800
1801 /* send CPSR for desired mode */
1802 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1803
1804 /* send r8 to r14/lr ... only FIQ needs more than r13..r14,
1805 * but this protocol doesn't understand that nuance.
1806 */
1807 for (j = 8; j <= 14; j++) {
1808 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1809 mode, j);
1810 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1811 r->dirty = false;
1812 }
1813
1814 /* send spsr if not in USR/SYS mode */
1815 if (mode != ARM_MODE_SYS) {
1816 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1817 mode, 16);
1818 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1819 r->dirty = false;
1820 }
1821 }
1822
1823 return ERROR_OK;
1824 }
1825
1826 static int xscale_read_memory(struct target *target, uint32_t address,
1827 uint32_t size, uint32_t count, uint8_t *buffer)
1828 {
1829 struct xscale_common *xscale = target_to_xscale(target);
1830 uint32_t *buf32;
1831 uint32_t i;
1832 int retval;
1833
1834 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1835
1836 if (target->state != TARGET_HALTED)
1837 {
1838 LOG_WARNING("target not halted");
1839 return ERROR_TARGET_NOT_HALTED;
1840 }
1841
1842 /* sanitize arguments */
1843 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1844 return ERROR_INVALID_ARGUMENTS;
1845
1846 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1847 return ERROR_TARGET_UNALIGNED_ACCESS;
1848
1849 /* send memory read request (command 0x1n, n: access size) */
1850 if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
1851 return retval;
1852
1853 /* send base address for read request */
1854 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1855 return retval;
1856
1857 /* send number of requested data words */
1858 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1859 return retval;
1860
1861 /* receive data from target (count times 32-bit words in host endianness) */
1862 buf32 = malloc(4 * count);
1863 if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
1864 return retval;
1865
1866 /* extract data from host-endian buffer into byte stream */
1867 for (i = 0; i < count; i++)
1868 {
1869 switch (size)
1870 {
1871 case 4:
1872 target_buffer_set_u32(target, buffer, buf32[i]);
1873 buffer += 4;
1874 break;
1875 case 2:
1876 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1877 buffer += 2;
1878 break;
1879 case 1:
1880 *buffer++ = buf32[i] & 0xff;
1881 break;
1882 default:
1883 LOG_ERROR("invalid read size");
1884 return ERROR_INVALID_ARGUMENTS;
1885 }
1886 }
1887
1888 free(buf32);
1889
1890 /* examine DCSR, to see if Sticky Abort (SA) got set */
1891 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1892 return retval;
1893 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1894 {
1895 /* clear SA bit */
1896 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1897 return retval;
1898
1899 return ERROR_TARGET_DATA_ABORT;
1900 }
1901
1902 return ERROR_OK;
1903 }
1904
1905 static int xscale_read_phys_memory(struct target *target, uint32_t address,
1906 uint32_t size, uint32_t count, uint8_t *buffer)
1907 {
1908 struct xscale_common *xscale = target_to_xscale(target);
1909
1910 /* with MMU inactive, there are only physical addresses */
1911 if (!xscale->armv4_5_mmu.mmu_enabled)
1912 return xscale_read_memory(target, address, size, count, buffer);
1913
1914 /** \todo: provide a non-stub implementation of this routine. */
1915 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1916 target_name(target), __func__);
1917 return ERROR_FAIL;
1918 }
1919
1920 static int xscale_write_memory(struct target *target, uint32_t address,
1921 uint32_t size, uint32_t count, uint8_t *buffer)
1922 {
1923 struct xscale_common *xscale = target_to_xscale(target);
1924 int retval;
1925
1926 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1927
1928 if (target->state != TARGET_HALTED)
1929 {
1930 LOG_WARNING("target not halted");
1931 return ERROR_TARGET_NOT_HALTED;
1932 }
1933
1934 /* sanitize arguments */
1935 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1936 return ERROR_INVALID_ARGUMENTS;
1937
1938 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1939 return ERROR_TARGET_UNALIGNED_ACCESS;
1940
1941 /* send memory write request (command 0x2n, n: access size) */
1942 if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
1943 return retval;
1944
1945 /* send base address for read request */
1946 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1947 return retval;
1948
1949 /* send number of requested data words to be written*/
1950 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1951 return retval;
1952
1953 /* extract data from host-endian buffer into byte stream */
1954 #if 0
1955 for (i = 0; i < count; i++)
1956 {
1957 switch (size)
1958 {
1959 case 4:
1960 value = target_buffer_get_u32(target, buffer);
1961 xscale_send_u32(target, value);
1962 buffer += 4;
1963 break;
1964 case 2:
1965 value = target_buffer_get_u16(target, buffer);
1966 xscale_send_u32(target, value);
1967 buffer += 2;
1968 break;
1969 case 1:
1970 value = *buffer;
1971 xscale_send_u32(target, value);
1972 buffer += 1;
1973 break;
1974 default:
1975 LOG_ERROR("should never get here");
1976 exit(-1);
1977 }
1978 }
1979 #endif
1980 if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
1981 return retval;
1982
1983 /* examine DCSR, to see if Sticky Abort (SA) got set */
1984 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1985 return retval;
1986 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1987 {
1988 /* clear SA bit */
1989 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1990 return retval;
1991
1992 return ERROR_TARGET_DATA_ABORT;
1993 }
1994
1995 return ERROR_OK;
1996 }
1997
1998 static int xscale_write_phys_memory(struct target *target, uint32_t address,
1999 uint32_t size, uint32_t count, uint8_t *buffer)
2000 {
2001 struct xscale_common *xscale = target_to_xscale(target);
2002
2003 /* with MMU inactive, there are only physical addresses */
2004 if (!xscale->armv4_5_mmu.mmu_enabled)
2005 return xscale_read_memory(target, address, size, count, buffer);
2006
2007 /** \todo: provide a non-stub implementation of this routine. */
2008 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
2009 target_name(target), __func__);
2010 return ERROR_FAIL;
2011 }
2012
2013 static int xscale_bulk_write_memory(struct target *target, uint32_t address,
2014 uint32_t count, uint8_t *buffer)
2015 {
2016 return xscale_write_memory(target, address, 4, count, buffer);
2017 }
2018
2019 static uint32_t xscale_get_ttb(struct target *target)
2020 {
2021 struct xscale_common *xscale = target_to_xscale(target);
2022 uint32_t ttb;
2023
2024 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
2025 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
2026
2027 return ttb;
2028 }
2029
2030 static void xscale_disable_mmu_caches(struct target *target, int mmu,
2031 int d_u_cache, int i_cache)
2032 {
2033 struct xscale_common *xscale = target_to_xscale(target);
2034 uint32_t cp15_control;
2035
2036 /* read cp15 control register */
2037 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2038 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2039
2040 if (mmu)
2041 cp15_control &= ~0x1U;
2042
2043 if (d_u_cache)
2044 {
2045 /* clean DCache */
2046 xscale_send_u32(target, 0x50);
2047 xscale_send_u32(target, xscale->cache_clean_address);
2048
2049 /* invalidate DCache */
2050 xscale_send_u32(target, 0x51);
2051
2052 cp15_control &= ~0x4U;
2053 }
2054
2055 if (i_cache)
2056 {
2057 /* invalidate ICache */
2058 xscale_send_u32(target, 0x52);
2059 cp15_control &= ~0x1000U;
2060 }
2061
2062 /* write new cp15 control register */
2063 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2064
2065 /* execute cpwait to ensure outstanding operations complete */
2066 xscale_send_u32(target, 0x53);
2067 }
2068
2069 static void xscale_enable_mmu_caches(struct target *target, int mmu,
2070 int d_u_cache, int i_cache)
2071 {
2072 struct xscale_common *xscale = target_to_xscale(target);
2073 uint32_t cp15_control;
2074
2075 /* read cp15 control register */
2076 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2077 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2078
2079 if (mmu)
2080 cp15_control |= 0x1U;
2081
2082 if (d_u_cache)
2083 cp15_control |= 0x4U;
2084
2085 if (i_cache)
2086 cp15_control |= 0x1000U;
2087
2088 /* write new cp15 control register */
2089 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2090
2091 /* execute cpwait to ensure outstanding operations complete */
2092 xscale_send_u32(target, 0x53);
2093 }
2094
2095 static int xscale_set_breakpoint(struct target *target,
2096 struct breakpoint *breakpoint)
2097 {
2098 int retval;
2099 struct xscale_common *xscale = target_to_xscale(target);
2100
2101 if (target->state != TARGET_HALTED)
2102 {
2103 LOG_WARNING("target not halted");
2104 return ERROR_TARGET_NOT_HALTED;
2105 }
2106
2107 if (breakpoint->set)
2108 {
2109 LOG_WARNING("breakpoint already set");
2110 return ERROR_OK;
2111 }
2112
2113 if (breakpoint->type == BKPT_HARD)
2114 {
2115 uint32_t value = breakpoint->address | 1;
2116 if (!xscale->ibcr0_used)
2117 {
2118 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2119 xscale->ibcr0_used = 1;
2120 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2121 }
2122 else if (!xscale->ibcr1_used)
2123 {
2124 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2125 xscale->ibcr1_used = 1;
2126 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2127 }
2128 else
2129 {
2130 LOG_ERROR("BUG: no hardware comparator available");
2131 return ERROR_OK;
2132 }
2133 }
2134 else if (breakpoint->type == BKPT_SOFT)
2135 {
2136 if (breakpoint->length == 4)
2137 {
2138 /* keep the original instruction in target endianness */
2139 if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2140 {
2141 return retval;
2142 }
2143 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2144 if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2145 {
2146 return retval;
2147 }
2148 }
2149 else
2150 {
2151 /* keep the original instruction in target endianness */
2152 if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2153 {
2154 return retval;
2155 }
2156 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2157 if ((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2158 {
2159 return retval;
2160 }
2161 }
2162 breakpoint->set = 1;
2163 }
2164
2165 return ERROR_OK;
2166 }
2167
2168 static int xscale_add_breakpoint(struct target *target,
2169 struct breakpoint *breakpoint)
2170 {
2171 struct xscale_common *xscale = target_to_xscale(target);
2172
2173 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2174 {
2175 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2176 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2177 }
2178
2179 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2180 {
2181 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2182 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2183 }
2184
2185 if (breakpoint->type == BKPT_HARD)
2186 {
2187 xscale->ibcr_available--;
2188 }
2189
2190 return ERROR_OK;
2191 }
2192
2193 static int xscale_unset_breakpoint(struct target *target,
2194 struct breakpoint *breakpoint)
2195 {
2196 int retval;
2197 struct xscale_common *xscale = target_to_xscale(target);
2198
2199 if (target->state != TARGET_HALTED)
2200 {
2201 LOG_WARNING("target not halted");
2202 return ERROR_TARGET_NOT_HALTED;
2203 }
2204
2205 if (!breakpoint->set)
2206 {
2207 LOG_WARNING("breakpoint not set");
2208 return ERROR_OK;
2209 }
2210
2211 if (breakpoint->type == BKPT_HARD)
2212 {
2213 if (breakpoint->set == 1)
2214 {
2215 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2216 xscale->ibcr0_used = 0;
2217 }
2218 else if (breakpoint->set == 2)
2219 {
2220 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2221 xscale->ibcr1_used = 0;
2222 }
2223 breakpoint->set = 0;
2224 }
2225 else
2226 {
2227 /* restore original instruction (kept in target endianness) */
2228 if (breakpoint->length == 4)
2229 {
2230 if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2231 {
2232 return retval;
2233 }
2234 }
2235 else
2236 {
2237 if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2238 {
2239 return retval;
2240 }
2241 }
2242 breakpoint->set = 0;
2243 }
2244
2245 return ERROR_OK;
2246 }
2247
2248 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2249 {
2250 struct xscale_common *xscale = target_to_xscale(target);
2251
2252 if (target->state != TARGET_HALTED)
2253 {
2254 LOG_WARNING("target not halted");
2255 return ERROR_TARGET_NOT_HALTED;
2256 }
2257
2258 if (breakpoint->set)
2259 {
2260 xscale_unset_breakpoint(target, breakpoint);
2261 }
2262
2263 if (breakpoint->type == BKPT_HARD)
2264 xscale->ibcr_available++;
2265
2266 return ERROR_OK;
2267 }
2268
2269 static int xscale_set_watchpoint(struct target *target,
2270 struct watchpoint *watchpoint)
2271 {
2272 struct xscale_common *xscale = target_to_xscale(target);
2273 uint8_t enable = 0;
2274 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2275 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2276
2277 if (target->state != TARGET_HALTED)
2278 {
2279 LOG_WARNING("target not halted");
2280 return ERROR_TARGET_NOT_HALTED;
2281 }
2282
2283 xscale_get_reg(dbcon);
2284
2285 switch (watchpoint->rw)
2286 {
2287 case WPT_READ:
2288 enable = 0x3;
2289 break;
2290 case WPT_ACCESS:
2291 enable = 0x2;
2292 break;
2293 case WPT_WRITE:
2294 enable = 0x1;
2295 break;
2296 default:
2297 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2298 }
2299
2300 if (!xscale->dbr0_used)
2301 {
2302 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2303 dbcon_value |= enable;
2304 xscale_set_reg_u32(dbcon, dbcon_value);
2305 watchpoint->set = 1;
2306 xscale->dbr0_used = 1;
2307 }
2308 else if (!xscale->dbr1_used)
2309 {
2310 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2311 dbcon_value |= enable << 2;
2312 xscale_set_reg_u32(dbcon, dbcon_value);
2313 watchpoint->set = 2;
2314 xscale->dbr1_used = 1;
2315 }
2316 else
2317 {
2318 LOG_ERROR("BUG: no hardware comparator available");
2319 return ERROR_OK;
2320 }
2321
2322 return ERROR_OK;
2323 }
2324
2325 static int xscale_add_watchpoint(struct target *target,
2326 struct watchpoint *watchpoint)
2327 {
2328 struct xscale_common *xscale = target_to_xscale(target);
2329
2330 if (xscale->dbr_available < 1)
2331 {
2332 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2333 }
2334
2335 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2336 {
2337 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2338 }
2339
2340 xscale->dbr_available--;
2341
2342 return ERROR_OK;
2343 }
2344
2345 static int xscale_unset_watchpoint(struct target *target,
2346 struct watchpoint *watchpoint)
2347 {
2348 struct xscale_common *xscale = target_to_xscale(target);
2349 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2350 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2351
2352 if (target->state != TARGET_HALTED)
2353 {
2354 LOG_WARNING("target not halted");
2355 return ERROR_TARGET_NOT_HALTED;
2356 }
2357
2358 if (!watchpoint->set)
2359 {
2360 LOG_WARNING("breakpoint not set");
2361 return ERROR_OK;
2362 }
2363
2364 if (watchpoint->set == 1)
2365 {
2366 dbcon_value &= ~0x3;
2367 xscale_set_reg_u32(dbcon, dbcon_value);
2368 xscale->dbr0_used = 0;
2369 }
2370 else if (watchpoint->set == 2)
2371 {
2372 dbcon_value &= ~0xc;
2373 xscale_set_reg_u32(dbcon, dbcon_value);
2374 xscale->dbr1_used = 0;
2375 }
2376 watchpoint->set = 0;
2377
2378 return ERROR_OK;
2379 }
2380
2381 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2382 {
2383 struct xscale_common *xscale = target_to_xscale(target);
2384
2385 if (target->state != TARGET_HALTED)
2386 {
2387 LOG_WARNING("target not halted");
2388 return ERROR_TARGET_NOT_HALTED;
2389 }
2390
2391 if (watchpoint->set)
2392 {
2393 xscale_unset_watchpoint(target, watchpoint);
2394 }
2395
2396 xscale->dbr_available++;
2397
2398 return ERROR_OK;
2399 }
2400
2401 static int xscale_get_reg(struct reg *reg)
2402 {
2403 struct xscale_reg *arch_info = reg->arch_info;
2404 struct target *target = arch_info->target;
2405 struct xscale_common *xscale = target_to_xscale(target);
2406
2407 /* DCSR, TX and RX are accessible via JTAG */
2408 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2409 {
2410 return xscale_read_dcsr(arch_info->target);
2411 }
2412 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2413 {
2414 /* 1 = consume register content */
2415 return xscale_read_tx(arch_info->target, 1);
2416 }
2417 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2418 {
2419 /* can't read from RX register (host -> debug handler) */
2420 return ERROR_OK;
2421 }
2422 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2423 {
2424 /* can't (explicitly) read from TXRXCTRL register */
2425 return ERROR_OK;
2426 }
2427 else /* Other DBG registers have to be transfered by the debug handler */
2428 {
2429 /* send CP read request (command 0x40) */
2430 xscale_send_u32(target, 0x40);
2431
2432 /* send CP register number */
2433 xscale_send_u32(target, arch_info->dbg_handler_number);
2434
2435 /* read register value */
2436 xscale_read_tx(target, 1);
2437 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2438
2439 reg->dirty = 0;
2440 reg->valid = 1;
2441 }
2442
2443 return ERROR_OK;
2444 }
2445
2446 static int xscale_set_reg(struct reg *reg, uint8_t* buf)
2447 {
2448 struct xscale_reg *arch_info = reg->arch_info;
2449 struct target *target = arch_info->target;
2450 struct xscale_common *xscale = target_to_xscale(target);
2451 uint32_t value = buf_get_u32(buf, 0, 32);
2452
2453 /* DCSR, TX and RX are accessible via JTAG */
2454 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2455 {
2456 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2457 return xscale_write_dcsr(arch_info->target, -1, -1);
2458 }
2459 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2460 {
2461 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2462 return xscale_write_rx(arch_info->target);
2463 }
2464 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2465 {
2466 /* can't write to TX register (debug-handler -> host) */
2467 return ERROR_OK;
2468 }
2469 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2470 {
2471 /* can't (explicitly) write to TXRXCTRL register */
2472 return ERROR_OK;
2473 }
2474 else /* Other DBG registers have to be transfered by the debug handler */
2475 {
2476 /* send CP write request (command 0x41) */
2477 xscale_send_u32(target, 0x41);
2478
2479 /* send CP register number */
2480 xscale_send_u32(target, arch_info->dbg_handler_number);
2481
2482 /* send CP register value */
2483 xscale_send_u32(target, value);
2484 buf_set_u32(reg->value, 0, 32, value);
2485 }
2486
2487 return ERROR_OK;
2488 }
2489
2490 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2491 {
2492 struct xscale_common *xscale = target_to_xscale(target);
2493 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2494 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2495
2496 /* send CP write request (command 0x41) */
2497 xscale_send_u32(target, 0x41);
2498
2499 /* send CP register number */
2500 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2501
2502 /* send CP register value */
2503 xscale_send_u32(target, value);
2504 buf_set_u32(dcsr->value, 0, 32, value);
2505
2506 return ERROR_OK;
2507 }
2508
2509 static int xscale_read_trace(struct target *target)
2510 {
2511 struct xscale_common *xscale = target_to_xscale(target);
2512 struct arm *armv4_5 = &xscale->armv4_5_common;
2513 struct xscale_trace_data **trace_data_p;
2514
2515 /* 258 words from debug handler
2516 * 256 trace buffer entries
2517 * 2 checkpoint addresses
2518 */
2519 uint32_t trace_buffer[258];
2520 int is_address[256];
2521 int i, j;
2522
2523 if (target->state != TARGET_HALTED)
2524 {
2525 LOG_WARNING("target must be stopped to read trace data");
2526 return ERROR_TARGET_NOT_HALTED;
2527 }
2528
2529 /* send read trace buffer command (command 0x61) */
2530 xscale_send_u32(target, 0x61);
2531
2532 /* receive trace buffer content */
2533 xscale_receive(target, trace_buffer, 258);
2534
2535 /* parse buffer backwards to identify address entries */
2536 for (i = 255; i >= 0; i--)
2537 {
2538 is_address[i] = 0;
2539 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2540 ((trace_buffer[i] & 0xf0) == 0xd0))
2541 {
2542 if (i >= 3)
2543 is_address[--i] = 1;
2544 if (i >= 2)
2545 is_address[--i] = 1;
2546 if (i >= 1)
2547 is_address[--i] = 1;
2548 if (i >= 0)
2549 is_address[--i] = 1;
2550 }
2551 }
2552
2553
2554 /* search first non-zero entry */
2555 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2556 ;
2557
2558 if (j == 256)
2559 {
2560 LOG_DEBUG("no trace data collected");
2561 return ERROR_XSCALE_NO_TRACE_DATA;
2562 }
2563
2564 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2565 ;
2566
2567 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2568 (*trace_data_p)->next = NULL;
2569 (*trace_data_p)->chkpt0 = trace_buffer[256];
2570 (*trace_data_p)->chkpt1 = trace_buffer[257];
2571 (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
2572 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2573 (*trace_data_p)->depth = 256 - j;
2574
2575 for (i = j; i < 256; i++)
2576 {
2577 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2578 if (is_address[i])
2579 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2580 else
2581 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2582 }
2583
2584 return ERROR_OK;
2585 }
2586
2587 static int xscale_read_instruction(struct target *target,
2588 struct arm_instruction *instruction)
2589 {
2590 struct xscale_common *xscale = target_to_xscale(target);
2591 int i;
2592 int section = -1;
2593 size_t size_read;
2594 uint32_t opcode;
2595 int retval;
2596
2597 if (!xscale->trace.image)
2598 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2599
2600 /* search for the section the current instruction belongs to */
2601 for (i = 0; i < xscale->trace.image->num_sections; i++)
2602 {
2603 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2604 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2605 {
2606 section = i;
2607 break;
2608 }
2609 }
2610
2611 if (section == -1)
2612 {
2613 /* current instruction couldn't be found in the image */
2614 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2615 }
2616
2617 if (xscale->trace.core_state == ARM_STATE_ARM)
2618 {
2619 uint8_t buf[4];
2620 if ((retval = image_read_section(xscale->trace.image, section,
2621 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2622 4, buf, &size_read)) != ERROR_OK)
2623 {
2624 LOG_ERROR("error while reading instruction: %i", retval);
2625 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2626 }
2627 opcode = target_buffer_get_u32(target, buf);
2628 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2629 }
2630 else if (xscale->trace.core_state == ARM_STATE_THUMB)
2631 {
2632 uint8_t buf[2];
2633 if ((retval = image_read_section(xscale->trace.image, section,
2634 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2635 2, buf, &size_read)) != ERROR_OK)
2636 {
2637 LOG_ERROR("error while reading instruction: %i", retval);
2638 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2639 }
2640 opcode = target_buffer_get_u16(target, buf);
2641 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2642 }
2643 else
2644 {
2645 LOG_ERROR("BUG: unknown core state encountered");
2646 exit(-1);
2647 }
2648
2649 return ERROR_OK;
2650 }
2651
2652 static int xscale_branch_address(struct xscale_trace_data *trace_data,
2653 int i, uint32_t *target)
2654 {
2655 /* if there are less than four entries prior to the indirect branch message
2656 * we can't extract the address */
2657 if (i < 4)
2658 {
2659 return -1;
2660 }
2661
2662 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2663 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2664
2665 return 0;
2666 }
2667
2668 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2669 {
2670 struct xscale_common *xscale = target_to_xscale(target);
2671 int next_pc_ok = 0;
2672 uint32_t next_pc = 0x0;
2673 struct xscale_trace_data *trace_data = xscale->trace.data;
2674 int retval;
2675
2676 while (trace_data)
2677 {
2678 int i, chkpt;
2679 int rollover;
2680 int branch;
2681 int exception;
2682 xscale->trace.core_state = ARM_STATE_ARM;
2683
2684 chkpt = 0;
2685 rollover = 0;
2686
2687 for (i = 0; i < trace_data->depth; i++)
2688 {
2689 next_pc_ok = 0;
2690 branch = 0;
2691 exception = 0;
2692
2693 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2694 continue;
2695
2696 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2697 {
2698 case 0: /* Exceptions */
2699 case 1:
2700 case 2:
2701 case 3:
2702 case 4:
2703 case 5:
2704 case 6:
2705 case 7:
2706 exception = (trace_data->entries[i].data & 0x70) >> 4;
2707 next_pc_ok = 1;
2708 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2709 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2710 break;
2711 case 8: /* Direct Branch */
2712 branch = 1;
2713 break;
2714 case 9: /* Indirect Branch */
2715 branch = 1;
2716 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2717 {
2718 next_pc_ok = 1;
2719 }
2720 break;
2721 case 13: /* Checkpointed Indirect Branch */
2722 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2723 {
2724 next_pc_ok = 1;
2725 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2726 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2727 LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2728 }
2729 /* explicit fall-through */
2730 case 12: /* Checkpointed Direct Branch */
2731 branch = 1;
2732 if (chkpt == 0)
2733 {
2734 next_pc_ok = 1;
2735 next_pc = trace_data->chkpt0;
2736 chkpt++;
2737 }
2738 else if (chkpt == 1)
2739 {
2740 next_pc_ok = 1;
2741 next_pc = trace_data->chkpt0;
2742 chkpt++;
2743 }
2744 else
2745 {
2746 LOG_WARNING("more than two checkpointed branches encountered");
2747 }
2748 break;
2749 case 15: /* Roll-over */
2750 rollover++;
2751 continue;
2752 default: /* Reserved */
2753 command_print(cmd_ctx, "--- reserved trace message ---");
2754 LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2755 return ERROR_OK;
2756 }
2757
2758 if (xscale->trace.pc_ok)
2759 {
2760 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2761 struct arm_instruction instruction;
2762
2763 if ((exception == 6) || (exception == 7))
2764 {
2765 /* IRQ or FIQ exception, no instruction executed */
2766 executed -= 1;
2767 }
2768
2769 while (executed-- >= 0)
2770 {
2771 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2772 {
2773 /* can't continue tracing with no image available */
2774 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2775 {
2776 return retval;
2777 }
2778 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2779 {
2780 /* TODO: handle incomplete images */
2781 }
2782 }
2783
2784 /* a precise abort on a load to the PC is included in the incremental
2785 * word count, other instructions causing data aborts are not included
2786 */
2787 if ((executed == 0) && (exception == 4)
2788 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2789 {
2790 if ((instruction.type == ARM_LDM)
2791 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2792 {
2793 executed--;
2794 }
2795 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2796 && (instruction.info.load_store.Rd != 15))
2797 {
2798 executed--;
2799 }
2800 }
2801
2802 /* only the last instruction executed
2803 * (the one that caused the control flow change)
2804 * could be a taken branch
2805 */
2806 if (((executed == -1) && (branch == 1)) &&
2807 (((instruction.type == ARM_B) ||
2808 (instruction.type == ARM_BL) ||
2809 (instruction.type == ARM_BLX)) &&
2810 (instruction.info.b_bl_bx_blx.target_address != 0xffffffff)))
2811 {
2812 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2813 }
2814 else
2815 {
2816 xscale->trace.current_pc += (xscale->trace.core_state == ARM_STATE_ARM) ? 4 : 2;
2817 }
2818 command_print(cmd_ctx, "%s", instruction.text);
2819 }
2820
2821 rollover = 0;
2822 }
2823
2824 if (next_pc_ok)
2825 {
2826 xscale->trace.current_pc = next_pc;
2827 xscale->trace.pc_ok = 1;
2828 }
2829 }
2830
2831 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARM_STATE_ARM) ? 4 : 2)
2832 {
2833 struct arm_instruction instruction;
2834 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2835 {
2836 /* can't continue tracing with no image available */
2837 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2838 {
2839 return retval;
2840 }
2841 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2842 {
2843 /* TODO: handle incomplete images */
2844 }
2845 }
2846 command_print(cmd_ctx, "%s", instruction.text);
2847 }
2848
2849 trace_data = trace_data->next;
2850 }
2851
2852 return ERROR_OK;
2853 }
2854
2855 static const struct reg_arch_type xscale_reg_type = {
2856 .get = xscale_get_reg,
2857 .set = xscale_set_reg,
2858 };
2859
2860 static void xscale_build_reg_cache(struct target *target)
2861 {
2862 struct xscale_common *xscale = target_to_xscale(target);
2863 struct arm *armv4_5 = &xscale->armv4_5_common;
2864 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2865 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2866 int i;
2867 int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
2868
2869 (*cache_p) = arm_build_reg_cache(target, armv4_5);
2870
2871 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2872 cache_p = &(*cache_p)->next;
2873
2874 /* fill in values for the xscale reg cache */
2875 (*cache_p)->name = "XScale registers";
2876 (*cache_p)->next = NULL;
2877 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
2878 (*cache_p)->num_regs = num_regs;
2879
2880 for (i = 0; i < num_regs; i++)
2881 {
2882 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2883 (*cache_p)->reg_list[i].value = calloc(4, 1);
2884 (*cache_p)->reg_list[i].dirty = 0;
2885 (*cache_p)->reg_list[i].valid = 0;
2886 (*cache_p)->reg_list[i].size = 32;
2887 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2888 (*cache_p)->reg_list[i].type = &xscale_reg_type;
2889 arch_info[i] = xscale_reg_arch_info[i];
2890 arch_info[i].target = target;
2891 }
2892
2893 xscale->reg_cache = (*cache_p);
2894 }
2895
2896 static int xscale_init_target(struct command_context *cmd_ctx,
2897 struct target *target)
2898 {
2899 xscale_build_reg_cache(target);
2900 return ERROR_OK;
2901 }
2902
2903 static int xscale_init_arch_info(struct target *target,
2904 struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
2905 {
2906 struct arm *armv4_5;
2907 uint32_t high_reset_branch, low_reset_branch;
2908 int i;
2909
2910 armv4_5 = &xscale->armv4_5_common;
2911
2912 /* store architecture specfic data */
2913 xscale->common_magic = XSCALE_COMMON_MAGIC;
2914
2915 /* we don't really *need* a variant param ... */
2916 if (variant) {
2917 int ir_length = 0;
2918
2919 if (strcmp(variant, "pxa250") == 0
2920 || strcmp(variant, "pxa255") == 0
2921 || strcmp(variant, "pxa26x") == 0)
2922 ir_length = 5;
2923 else if (strcmp(variant, "pxa27x") == 0
2924 || strcmp(variant, "ixp42x") == 0
2925 || strcmp(variant, "ixp45x") == 0
2926 || strcmp(variant, "ixp46x") == 0)
2927 ir_length = 7;
2928 else if (strcmp(variant, "pxa3xx") == 0)
2929 ir_length = 11;
2930 else
2931 LOG_WARNING("%s: unrecognized variant %s",
2932 tap->dotted_name, variant);
2933
2934 if (ir_length && ir_length != tap->ir_length) {
2935 LOG_WARNING("%s: IR length for %s is %d; fixing",
2936 tap->dotted_name, variant, ir_length);
2937 tap->ir_length = ir_length;
2938 }
2939 }
2940
2941 /* PXA3xx shifts the JTAG instructions */
2942 if (tap->ir_length == 11)
2943 xscale->xscale_variant = XSCALE_PXA3XX;
2944 else
2945 xscale->xscale_variant = XSCALE_IXP4XX_PXA2XX;
2946
2947 /* the debug handler isn't installed (and thus not running) at this time */
2948 xscale->handler_address = 0xfe000800;
2949
2950 /* clear the vectors we keep locally for reference */
2951 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2952 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2953
2954 /* no user-specified vectors have been configured yet */
2955 xscale->static_low_vectors_set = 0x0;
2956 xscale->static_high_vectors_set = 0x0;
2957
2958 /* calculate branches to debug handler */
2959 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
2960 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
2961
2962 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
2963 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
2964
2965 for (i = 1; i <= 7; i++)
2966 {
2967 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2968 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2969 }
2970
2971 /* 64kB aligned region used for DCache cleaning */
2972 xscale->cache_clean_address = 0xfffe0000;
2973
2974 xscale->hold_rst = 0;
2975 xscale->external_debug_break = 0;
2976
2977 xscale->ibcr_available = 2;
2978 xscale->ibcr0_used = 0;
2979 xscale->ibcr1_used = 0;
2980
2981 xscale->dbr_available = 2;
2982 xscale->dbr0_used = 0;
2983 xscale->dbr1_used = 0;
2984
2985 LOG_INFO("%s: hardware has 2 breakpoints and 2 watchpoints",
2986 target_name(target));
2987
2988 xscale->arm_bkpt = ARMV5_BKPT(0x0);
2989 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
2990
2991 xscale->vector_catch = 0x1;
2992
2993 xscale->trace.capture_status = TRACE_IDLE;
2994 xscale->trace.data = NULL;
2995 xscale->trace.image = NULL;
2996 xscale->trace.buffer_enabled = 0;
2997 xscale->trace.buffer_fill = 0;
2998
2999 /* prepare ARMv4/5 specific information */
3000 armv4_5->arch_info = xscale;
3001 armv4_5->read_core_reg = xscale_read_core_reg;
3002 armv4_5->write_core_reg = xscale_write_core_reg;
3003 armv4_5->full_context = xscale_full_context;
3004
3005 arm_init_arch_info(target, armv4_5);
3006
3007 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
3008 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
3009 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
3010 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
3011 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
3012 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
3013 xscale->armv4_5_mmu.has_tiny_pages = 1;
3014 xscale->armv4_5_mmu.mmu_enabled = 0;
3015
3016 return ERROR_OK;
3017 }
3018
3019 static int xscale_target_create(struct target *target, Jim_Interp *interp)
3020 {
3021 struct xscale_common *xscale;
3022
3023 if (sizeof xscale_debug_handler - 1 > 0x800) {
3024 LOG_ERROR("debug_handler.bin: larger than 2kb");
3025 return ERROR_FAIL;
3026 }
3027
3028 xscale = calloc(1, sizeof(*xscale));
3029 if (!xscale)
3030 return ERROR_FAIL;
3031
3032 return xscale_init_arch_info(target, xscale, target->tap,
3033 target->variant);
3034 }
3035
3036 COMMAND_HANDLER(xscale_handle_debug_handler_command)
3037 {
3038 struct target *target = NULL;
3039 struct xscale_common *xscale;
3040 int retval;
3041 uint32_t handler_address;
3042
3043 if (CMD_ARGC < 2)
3044 {
3045 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3046 return ERROR_OK;
3047 }
3048
3049 if ((target = get_target(CMD_ARGV[0])) == NULL)
3050 {
3051 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3052 return ERROR_FAIL;
3053 }
3054
3055 xscale = target_to_xscale(target);
3056 retval = xscale_verify_pointer(CMD_CTX, xscale);
3057 if (retval != ERROR_OK)
3058 return retval;
3059
3060 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3061
3062 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3063 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3064 {
3065 xscale->handler_address = handler_address;
3066 }
3067 else
3068 {
3069 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3070 return ERROR_FAIL;
3071 }
3072
3073 return ERROR_OK;
3074 }
3075
3076 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3077 {
3078 struct target *target = NULL;
3079 struct xscale_common *xscale;
3080 int retval;
3081 uint32_t cache_clean_address;
3082
3083 if (CMD_ARGC < 2)
3084 {
3085 return ERROR_COMMAND_SYNTAX_ERROR;
3086 }
3087
3088 target = get_target(CMD_ARGV[0]);
3089 if (target == NULL)
3090 {
3091 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3092 return ERROR_FAIL;
3093 }
3094 xscale = target_to_xscale(target);
3095 retval = xscale_verify_pointer(CMD_CTX, xscale);
3096 if (retval != ERROR_OK)
3097 return retval;
3098
3099 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3100
3101 if (cache_clean_address & 0xffff)
3102 {
3103 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3104 }
3105 else
3106 {
3107 xscale->cache_clean_address = cache_clean_address;
3108 }
3109
3110 return ERROR_OK;
3111 }
3112
3113 COMMAND_HANDLER(xscale_handle_cache_info_command)
3114 {
3115 struct target *target = get_current_target(CMD_CTX);
3116 struct xscale_common *xscale = target_to_xscale(target);
3117 int retval;
3118
3119 retval = xscale_verify_pointer(CMD_CTX, xscale);
3120 if (retval != ERROR_OK)
3121 return retval;
3122
3123 return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
3124 }
3125
3126 static int xscale_virt2phys(struct target *target,
3127 uint32_t virtual, uint32_t *physical)
3128 {
3129 struct xscale_common *xscale = target_to_xscale(target);
3130 int type;
3131 uint32_t cb;
3132 int domain;
3133 uint32_t ap;
3134
3135 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3136 LOG_ERROR(xscale_not);
3137 return ERROR_TARGET_INVALID;
3138 }
3139
3140 uint32_t ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3141 if (type == -1)
3142 {
3143 return ret;
3144 }
3145 *physical = ret;
3146 return ERROR_OK;
3147 }
3148
3149 static int xscale_mmu(struct target *target, int *enabled)
3150 {
3151 struct xscale_common *xscale = target_to_xscale(target);
3152
3153 if (target->state != TARGET_HALTED)
3154 {
3155 LOG_ERROR("Target not halted");
3156 return ERROR_TARGET_INVALID;
3157 }
3158 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3159 return ERROR_OK;
3160 }
3161
3162 COMMAND_HANDLER(xscale_handle_mmu_command)
3163 {
3164 struct target *target = get_current_target(CMD_CTX);
3165 struct xscale_common *xscale = target_to_xscale(target);
3166 int retval;
3167
3168 retval = xscale_verify_pointer(CMD_CTX, xscale);
3169 if (retval != ERROR_OK)
3170 return retval;
3171
3172 if (target->state != TARGET_HALTED)
3173 {
3174 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3175 return ERROR_OK;
3176 }
3177
3178 if (CMD_ARGC >= 1)
3179 {
3180 bool enable;
3181 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3182 if (enable)
3183 xscale_enable_mmu_caches(target, 1, 0, 0);
3184 else
3185 xscale_disable_mmu_caches(target, 1, 0, 0);
3186 xscale->armv4_5_mmu.mmu_enabled = enable;
3187 }
3188
3189 command_print(CMD_CTX, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3190
3191 return ERROR_OK;
3192 }
3193
3194 COMMAND_HANDLER(xscale_handle_idcache_command)
3195 {
3196 struct target *target = get_current_target(CMD_CTX);
3197 struct xscale_common *xscale = target_to_xscale(target);
3198
3199 int retval = xscale_verify_pointer(CMD_CTX, xscale);
3200 if (retval != ERROR_OK)
3201 return retval;
3202
3203 if (target->state != TARGET_HALTED)
3204 {
3205 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3206 return ERROR_OK;
3207 }
3208
3209 bool icache;
3210 COMMAND_PARSE_BOOL(CMD_NAME, icache, "icache", "dcache");
3211
3212 if (CMD_ARGC >= 1)
3213 {
3214 bool enable;
3215 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3216 if (enable)
3217 xscale_enable_mmu_caches(target, 1, 0, 0);
3218 else
3219 xscale_disable_mmu_caches(target, 1, 0, 0);
3220 if (icache)
3221 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3222 else
3223 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3224 }
3225
3226 bool enabled = icache ?
3227 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3228 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3229 const char *msg = enabled ? "enabled" : "disabled";
3230 command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
3231
3232 return ERROR_OK;
3233 }
3234
3235 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3236 {
3237 struct target *target = get_current_target(CMD_CTX);
3238 struct xscale_common *xscale = target_to_xscale(target);
3239 int retval;
3240
3241 retval = xscale_verify_pointer(CMD_CTX, xscale);
3242 if (retval != ERROR_OK)
3243 return retval;
3244
3245 if (CMD_ARGC < 1)
3246 {
3247 command_print(CMD_CTX, "usage: xscale vector_catch [mask]");
3248 }
3249 else
3250 {
3251 COMMAND_PARSE_NUMBER(u8, CMD_ARGV[0], xscale->vector_catch);
3252 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3253 xscale_write_dcsr(target, -1, -1);
3254 }
3255
3256 command_print(CMD_CTX, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3257
3258 return ERROR_OK;
3259 }
3260
3261
3262 COMMAND_HANDLER(xscale_handle_vector_table_command)
3263 {
3264 struct target *target = get_current_target(CMD_CTX);
3265 struct xscale_common *xscale = target_to_xscale(target);
3266 int err = 0;
3267 int retval;
3268
3269 retval = xscale_verify_pointer(CMD_CTX, xscale);
3270 if (retval != ERROR_OK)
3271 return retval;
3272
3273 if (CMD_ARGC == 0) /* print current settings */
3274 {
3275 int idx;
3276
3277 command_print(CMD_CTX, "active user-set static vectors:");
3278 for (idx = 1; idx < 8; idx++)
3279 if (xscale->static_low_vectors_set & (1 << idx))
3280 command_print(CMD_CTX, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
3281 for (idx = 1; idx < 8; idx++)
3282 if (xscale->static_high_vectors_set & (1 << idx))
3283 command_print(CMD_CTX, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
3284 return ERROR_OK;
3285 }
3286
3287 if (CMD_ARGC != 3)
3288 err = 1;
3289 else
3290 {
3291 int idx;
3292 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3293 uint32_t vec;
3294 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3295
3296 if (idx < 1 || idx >= 8)
3297 err = 1;
3298
3299 if (!err && strcmp(CMD_ARGV[0], "low") == 0)
3300 {
3301 xscale->static_low_vectors_set |= (1<<idx);
3302 xscale->static_low_vectors[idx] = vec;
3303 }
3304 else if (!err && (strcmp(CMD_ARGV[0], "high") == 0))
3305 {
3306 xscale->static_high_vectors_set |= (1<<idx);
3307 xscale->static_high_vectors[idx] = vec;
3308 }
3309 else
3310 err = 1;
3311 }
3312
3313 if (err)
3314 command_print(CMD_CTX, "usage: xscale vector_table <high|low> <index> <code>");
3315
3316 return ERROR_OK;
3317 }
3318
3319
3320 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3321 {
3322 struct target *target = get_current_target(CMD_CTX);
3323 struct xscale_common *xscale = target_to_xscale(target);
3324 struct arm *armv4_5 = &xscale->armv4_5_common;
3325 uint32_t dcsr_value;
3326 int retval;
3327
3328 retval = xscale_verify_pointer(CMD_CTX, xscale);
3329 if (retval != ERROR_OK)
3330 return retval;
3331
3332 if (target->state != TARGET_HALTED)
3333 {
3334 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3335 return ERROR_OK;
3336 }
3337
3338 if ((CMD_ARGC >= 1) && (strcmp("enable", CMD_ARGV[0]) == 0))
3339 {
3340 struct xscale_trace_data *td, *next_td;
3341 xscale->trace.buffer_enabled = 1;
3342
3343 /* free old trace data */
3344 td = xscale->trace.data;
3345 while (td)
3346 {
3347 next_td = td->next;
3348
3349 if (td->entries)
3350 free(td->entries);
3351 free(td);
3352 td = next_td;
3353 }
3354 xscale->trace.data = NULL;
3355 }
3356 else if ((CMD_ARGC >= 1) && (strcmp("disable", CMD_ARGV[0]) == 0))
3357 {
3358 xscale->trace.buffer_enabled = 0;
3359 }
3360
3361 if ((CMD_ARGC >= 2) && (strcmp("fill", CMD_ARGV[1]) == 0))
3362 {
3363 uint32_t fill = 1;
3364 if (CMD_ARGC >= 3)
3365 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], fill);
3366 xscale->trace.buffer_fill = fill;
3367 }
3368 else if ((CMD_ARGC >= 2) && (strcmp("wrap", CMD_ARGV[1]) == 0))
3369 {
3370 xscale->trace.buffer_fill = -1;
3371 }
3372
3373 if (xscale->trace.buffer_enabled)
3374 {
3375 /* if we enable the trace buffer in fill-once
3376 * mode we know the address of the first instruction */
3377 xscale->trace.pc_ok = 1;
3378 xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
3379 }
3380 else
3381 {
3382 /* otherwise the address is unknown, and we have no known good PC */
3383 xscale->trace.pc_ok = 0;
3384 }
3385
3386 command_print(CMD_CTX, "trace buffer %s (%s)",
3387 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3388 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3389
3390 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3391 if (xscale->trace.buffer_fill >= 0)
3392 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3393 else
3394 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3395
3396 return ERROR_OK;
3397 }
3398
3399 COMMAND_HANDLER(xscale_handle_trace_image_command)
3400 {
3401 struct target *target = get_current_target(CMD_CTX);
3402 struct xscale_common *xscale = target_to_xscale(target);
3403 int retval;
3404
3405 if (CMD_ARGC < 1)
3406 {
3407 command_print(CMD_CTX, "usage: xscale trace_image <file> [base address] [type]");
3408 return ERROR_OK;
3409 }
3410
3411 retval = xscale_verify_pointer(CMD_CTX, xscale);
3412 if (retval != ERROR_OK)
3413 return retval;
3414
3415 if (xscale->trace.image)
3416 {
3417 image_close(xscale->trace.image);
3418 free(xscale->trace.image);
3419 command_print(CMD_CTX, "previously loaded image found and closed");
3420 }
3421
3422 xscale->trace.image = malloc(sizeof(struct image));
3423 xscale->trace.image->base_address_set = 0;
3424 xscale->trace.image->start_address_set = 0;
3425
3426 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3427 if (CMD_ARGC >= 2)
3428 {
3429 xscale->trace.image->base_address_set = 1;
3430 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], xscale->trace.image->base_address);
3431 }
3432 else
3433 {
3434 xscale->trace.image->base_address_set = 0;
3435 }
3436
3437 if (image_open(xscale->trace.image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3438 {
3439 free(xscale->trace.image);
3440 xscale->trace.image = NULL;
3441 return ERROR_OK;
3442 }
3443
3444 return ERROR_OK;
3445 }
3446
3447 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3448 {
3449 struct target *target = get_current_target(CMD_CTX);
3450 struct xscale_common *xscale = target_to_xscale(target);
3451 struct xscale_trace_data *trace_data;
3452 struct fileio file;
3453 int retval;
3454
3455 retval = xscale_verify_pointer(CMD_CTX, xscale);
3456 if (retval != ERROR_OK)
3457 return retval;
3458
3459 if (target->state != TARGET_HALTED)
3460 {
3461 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3462 return ERROR_OK;
3463 }
3464
3465 if (CMD_ARGC < 1)
3466 {
3467 command_print(CMD_CTX, "usage: xscale dump_trace <file>");
3468 return ERROR_OK;
3469 }
3470
3471 trace_data = xscale->trace.data;
3472
3473 if (!trace_data)
3474 {
3475 command_print(CMD_CTX, "no trace data collected");
3476 return ERROR_OK;
3477 }
3478
3479 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3480 {
3481 return ERROR_OK;
3482 }
3483
3484 while (trace_data)
3485 {
3486 int i;
3487
3488 fileio_write_u32(&file, trace_data->chkpt0);
3489 fileio_write_u32(&file, trace_data->chkpt1);
3490 fileio_write_u32(&file, trace_data->last_instruction);
3491 fileio_write_u32(&file, trace_data->depth);
3492
3493 for (i = 0; i < trace_data->depth; i++)
3494 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3495
3496 trace_data = trace_data->next;
3497 }
3498
3499 fileio_close(&file);
3500
3501 return ERROR_OK;
3502 }
3503
3504 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3505 {
3506 struct target *target = get_current_target(CMD_CTX);
3507 struct xscale_common *xscale = target_to_xscale(target);
3508 int retval;
3509
3510 retval = xscale_verify_pointer(CMD_CTX, xscale);
3511 if (retval != ERROR_OK)
3512 return retval;
3513
3514 xscale_analyze_trace(target, CMD_CTX);
3515
3516 return ERROR_OK;
3517 }
3518
3519 COMMAND_HANDLER(xscale_handle_cp15)
3520 {
3521 struct target *target = get_current_target(CMD_CTX);
3522 struct xscale_common *xscale = target_to_xscale(target);
3523 int retval;
3524
3525 retval = xscale_verify_pointer(CMD_CTX, xscale);
3526 if (retval != ERROR_OK)
3527 return retval;
3528
3529 if (target->state != TARGET_HALTED)
3530 {
3531 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3532 return ERROR_OK;
3533 }
3534 uint32_t reg_no = 0;
3535 struct reg *reg = NULL;
3536 if (CMD_ARGC > 0)
3537 {
3538 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3539 /*translate from xscale cp15 register no to openocd register*/
3540 switch (reg_no)
3541 {
3542 case 0:
3543 reg_no = XSCALE_MAINID;
3544 break;
3545 case 1:
3546 reg_no = XSCALE_CTRL;
3547 break;
3548 case 2:
3549 reg_no = XSCALE_TTB;
3550 break;
3551 case 3:
3552 reg_no = XSCALE_DAC;
3553 break;
3554 case 5:
3555 reg_no = XSCALE_FSR;
3556 break;
3557 case 6:
3558 reg_no = XSCALE_FAR;
3559 break;
3560 case 13:
3561 reg_no = XSCALE_PID;
3562 break;
3563 case 15:
3564 reg_no = XSCALE_CPACCESS;
3565 break;
3566 default:
3567 command_print(CMD_CTX, "invalid register number");
3568 return ERROR_INVALID_ARGUMENTS;
3569 }
3570 reg = &xscale->reg_cache->reg_list[reg_no];
3571
3572 }
3573 if (CMD_ARGC == 1)
3574 {
3575 uint32_t value;
3576
3577 /* read cp15 control register */
3578 xscale_get_reg(reg);
3579 value = buf_get_u32(reg->value, 0, 32);
3580 command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
3581 }
3582 else if (CMD_ARGC == 2)
3583 {
3584 uint32_t value;
3585 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3586
3587 /* send CP write request (command 0x41) */
3588 xscale_send_u32(target, 0x41);
3589
3590 /* send CP register number */
3591 xscale_send_u32(target, reg_no);
3592
3593 /* send CP register value */
3594 xscale_send_u32(target, value);
3595
3596 /* execute cpwait to ensure outstanding operations complete */
3597 xscale_send_u32(target, 0x53);
3598 }
3599 else
3600 {
3601 command_print(CMD_CTX, "usage: cp15 [register]<, [value]>");
3602 }
3603
3604 return ERROR_OK;
3605 }
3606
3607 static const struct command_registration xscale_exec_command_handlers[] = {
3608 {
3609 .name = "cache_info",
3610 .handler = &xscale_handle_cache_info_command,
3611 .mode = COMMAND_EXEC, NULL,
3612 },
3613
3614 {
3615 .name = "mmu",
3616 .handler = &xscale_handle_mmu_command,
3617 .mode = COMMAND_EXEC,
3618 .usage = "[enable|disable]",
3619 .help = "enable or disable the MMU",
3620 },
3621 {
3622 .name = "icache",
3623 .handler = &xscale_handle_idcache_command,
3624 .mode = COMMAND_EXEC,
3625 .usage = "[enable|disable]",
3626 .help = "enable or disable the ICache",
3627 },
3628 {
3629 .name = "dcache",
3630 .handler = &xscale_handle_idcache_command,
3631 .mode = COMMAND_EXEC,
3632 .usage = "[enable|disable]",
3633 .help = "enable or disable the DCache",
3634 },
3635
3636 {
3637 .name = "vector_catch",
3638 .handler = &xscale_handle_vector_catch_command,
3639 .mode = COMMAND_EXEC,
3640 .help = "mask of vectors that should be caught",
3641 .usage = "[<mask>]",
3642 },
3643 {
3644 .name = "vector_table",
3645 .handler = &xscale_handle_vector_table_command,
3646 .mode = COMMAND_EXEC,
3647 .usage = "<high|low> <index> <code>",
3648 .help = "set static code for exception handler entry",
3649 },
3650
3651 {
3652 .name = "trace_buffer",
3653 .handler = &xscale_handle_trace_buffer_command,
3654 .mode = COMMAND_EXEC,
3655 .usage = "<enable | disable> [fill [n]|wrap]",
3656 },
3657 {
3658 .name = "dump_trace",
3659 .handler = &xscale_handle_dump_trace_command,
3660 .mode = COMMAND_EXEC,
3661 .help = "dump content of trace buffer to <file>",
3662 .usage = "<file>",
3663 },
3664 {
3665 .name = "analyze_trace",
3666 .handler = &xscale_handle_analyze_trace_buffer_command,
3667 .mode = COMMAND_EXEC,
3668 .help = "analyze content of trace buffer",
3669 },
3670 {
3671 .name = "trace_image",
3672 .handler = &xscale_handle_trace_image_command,
3673 COMMAND_EXEC,
3674 .help = "load image from <file> [base address]",
3675 .usage = "<file> [address] [type]",
3676 },
3677
3678 {
3679 .name = "cp15",
3680 .handler = &xscale_handle_cp15,
3681 .mode = COMMAND_EXEC,
3682 .help = "access coproc 15",
3683 .usage = "<register> [value]",
3684 },
3685 COMMAND_REGISTRATION_DONE
3686 };
3687 static const struct command_registration xscale_any_command_handlers[] = {
3688 {
3689 .name = "debug_handler",
3690 .handler = &xscale_handle_debug_handler_command,
3691 .mode = COMMAND_ANY,
3692 .usage = "<target#> <address>",
3693 },
3694 {
3695 .name = "cache_clean_address",
3696 .handler = &xscale_handle_cache_clean_address_command,
3697 .mode = COMMAND_ANY,
3698 },
3699 {
3700 .chain = xscale_exec_command_handlers,
3701 },
3702 COMMAND_REGISTRATION_DONE
3703 };
3704 static const struct command_registration xscale_command_handlers[] = {
3705 {
3706 .chain = arm_command_handlers,
3707 },
3708 {
3709 .name = "xscale",
3710 .mode = COMMAND_ANY,
3711 .help = "xscale command group",
3712 .chain = xscale_any_command_handlers,
3713 },
3714 COMMAND_REGISTRATION_DONE
3715 };
3716
3717 struct target_type xscale_target =
3718 {
3719 .name = "xscale",
3720
3721 .poll = xscale_poll,
3722 .arch_state = xscale_arch_state,
3723
3724 .target_request_data = NULL,
3725
3726 .halt = xscale_halt,
3727 .resume = xscale_resume,
3728 .step = xscale_step,
3729
3730 .assert_reset = xscale_assert_reset,
3731 .deassert_reset = xscale_deassert_reset,
3732 .soft_reset_halt = NULL,
3733
3734 .get_gdb_reg_list = arm_get_gdb_reg_list,
3735
3736 .read_memory = xscale_read_memory,
3737 .read_phys_memory = xscale_read_phys_memory,
3738 .write_memory = xscale_write_memory,
3739 .write_phys_memory = xscale_write_phys_memory,
3740 .bulk_write_memory = xscale_bulk_write_memory,
3741
3742 .checksum_memory = arm_checksum_memory,
3743 .blank_check_memory = arm_blank_check_memory,
3744
3745 .run_algorithm = armv4_5_run_algorithm,
3746
3747 .add_breakpoint = xscale_add_breakpoint,
3748 .remove_breakpoint = xscale_remove_breakpoint,
3749 .add_watchpoint = xscale_add_watchpoint,
3750 .remove_watchpoint = xscale_remove_watchpoint,
3751
3752 .commands = xscale_command_handlers,
3753 .target_create = xscale_target_create,
3754 .init_target = xscale_init_target,
3755
3756 .virt2phys = xscale_virt2phys,
3757 .mmu = xscale_mmu
3758 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)