ARM: keep a handle to the PC
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
29
30 #include "breakpoints.h"
31 #include "xscale.h"
32 #include "target_type.h"
33 #include "arm_jtag.h"
34 #include "arm_simulator.h"
35 #include "arm_disassembler.h"
36 #include <helper/time_support.h>
37 #include "register.h"
38 #include "image.h"
39 #include "arm_opcodes.h"
40 #include "armv4_5.h"
41
42
43 /*
44 * Important XScale documents available as of October 2009 include:
45 *
46 * Intel XScale® Core Developer’s Manual, January 2004
47 * Order Number: 273473-002
48 * This has a chapter detailing debug facilities, and punts some
49 * details to chip-specific microarchitecture documents.
50 *
51 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
52 * Document Number: 273539-005
53 * Less detailed than the developer's manual, but summarizes those
54 * missing details (for most XScales) and gives LOTS of notes about
55 * debugger/handler interaction issues. Presents a simpler reset
56 * and load-handler sequence than the arch doc. (Note, OpenOCD
57 * doesn't currently support "Hot-Debug" as defined there.)
58 *
59 * Chip-specific microarchitecture documents may also be useful.
60 */
61
62
63 /* forward declarations */
64 static int xscale_resume(struct target *, int current,
65 uint32_t address, int handle_breakpoints, int debug_execution);
66 static int xscale_debug_entry(struct target *);
67 static int xscale_restore_banked(struct target *);
68 static int xscale_get_reg(struct reg *reg);
69 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
70 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
71 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
72 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
73 static int xscale_read_trace(struct target *);
74
75
76 /* This XScale "debug handler" is loaded into the processor's
77 * mini-ICache, which is 2K of code writable only via JTAG.
78 *
79 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
80 * binary files cleanly. It's string oriented, and terminates them
81 * with a NUL character. Better would be to generate the constants
82 * and let other code decide names, scoping, and other housekeeping.
83 */
84 static /* unsigned const char xscale_debug_handler[] = ... */
85 #include "xscale_debug.h"
86
87 static char *const xscale_reg_list[] =
88 {
89 "XSCALE_MAINID", /* 0 */
90 "XSCALE_CACHETYPE",
91 "XSCALE_CTRL",
92 "XSCALE_AUXCTRL",
93 "XSCALE_TTB",
94 "XSCALE_DAC",
95 "XSCALE_FSR",
96 "XSCALE_FAR",
97 "XSCALE_PID",
98 "XSCALE_CPACCESS",
99 "XSCALE_IBCR0", /* 10 */
100 "XSCALE_IBCR1",
101 "XSCALE_DBR0",
102 "XSCALE_DBR1",
103 "XSCALE_DBCON",
104 "XSCALE_TBREG",
105 "XSCALE_CHKPT0",
106 "XSCALE_CHKPT1",
107 "XSCALE_DCSR",
108 "XSCALE_TX",
109 "XSCALE_RX", /* 20 */
110 "XSCALE_TXRXCTRL",
111 };
112
113 static const struct xscale_reg xscale_reg_arch_info[] =
114 {
115 {XSCALE_MAINID, NULL},
116 {XSCALE_CACHETYPE, NULL},
117 {XSCALE_CTRL, NULL},
118 {XSCALE_AUXCTRL, NULL},
119 {XSCALE_TTB, NULL},
120 {XSCALE_DAC, NULL},
121 {XSCALE_FSR, NULL},
122 {XSCALE_FAR, NULL},
123 {XSCALE_PID, NULL},
124 {XSCALE_CPACCESS, NULL},
125 {XSCALE_IBCR0, NULL},
126 {XSCALE_IBCR1, NULL},
127 {XSCALE_DBR0, NULL},
128 {XSCALE_DBR1, NULL},
129 {XSCALE_DBCON, NULL},
130 {XSCALE_TBREG, NULL},
131 {XSCALE_CHKPT0, NULL},
132 {XSCALE_CHKPT1, NULL},
133 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
134 {-1, NULL}, /* TX accessed via JTAG */
135 {-1, NULL}, /* RX accessed via JTAG */
136 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
137 };
138
139 /* convenience wrapper to access XScale specific registers */
140 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
141 {
142 uint8_t buf[4];
143
144 buf_set_u32(buf, 0, 32, value);
145
146 return xscale_set_reg(reg, buf);
147 }
148
149 static const char xscale_not[] = "target is not an XScale";
150
151 static int xscale_verify_pointer(struct command_context *cmd_ctx,
152 struct xscale_common *xscale)
153 {
154 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
155 command_print(cmd_ctx, xscale_not);
156 return ERROR_TARGET_INVALID;
157 }
158 return ERROR_OK;
159 }
160
161 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr)
162 {
163 if (tap == NULL)
164 return ERROR_FAIL;
165
166 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
167 {
168 struct scan_field field;
169 uint8_t scratch[4];
170
171 memset(&field, 0, sizeof field);
172 field.tap = tap;
173 field.num_bits = tap->ir_length;
174 field.out_value = scratch;
175 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
176
177 jtag_add_ir_scan(1, &field, jtag_get_end_state());
178 }
179
180 return ERROR_OK;
181 }
182
183 static int xscale_read_dcsr(struct target *target)
184 {
185 struct xscale_common *xscale = target_to_xscale(target);
186 int retval;
187 struct scan_field fields[3];
188 uint8_t field0 = 0x0;
189 uint8_t field0_check_value = 0x2;
190 uint8_t field0_check_mask = 0x7;
191 uint8_t field2 = 0x0;
192 uint8_t field2_check_value = 0x0;
193 uint8_t field2_check_mask = 0x1;
194
195 jtag_set_end_state(TAP_DRPAUSE);
196 xscale_jtag_set_instr(target->tap,
197 XSCALE_SELDCSR << xscale->xscale_variant);
198
199 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
200 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
201
202 memset(&fields, 0, sizeof fields);
203
204 fields[0].tap = target->tap;
205 fields[0].num_bits = 3;
206 fields[0].out_value = &field0;
207 uint8_t tmp;
208 fields[0].in_value = &tmp;
209
210 fields[1].tap = target->tap;
211 fields[1].num_bits = 32;
212 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
213
214 fields[2].tap = target->tap;
215 fields[2].num_bits = 1;
216 fields[2].out_value = &field2;
217 uint8_t tmp2;
218 fields[2].in_value = &tmp2;
219
220 jtag_add_dr_scan(3, fields, jtag_get_end_state());
221
222 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
223 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
224
225 if ((retval = jtag_execute_queue()) != ERROR_OK)
226 {
227 LOG_ERROR("JTAG error while reading DCSR");
228 return retval;
229 }
230
231 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
232 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
233
234 /* write the register with the value we just read
235 * on this second pass, only the first bit of field0 is guaranteed to be 0)
236 */
237 field0_check_mask = 0x1;
238 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
239 fields[1].in_value = NULL;
240
241 jtag_set_end_state(TAP_IDLE);
242
243 jtag_add_dr_scan(3, fields, jtag_get_end_state());
244
245 /* DANGER!!! this must be here. It will make sure that the arguments
246 * to jtag_set_check_value() does not go out of scope! */
247 return jtag_execute_queue();
248 }
249
250
251 static void xscale_getbuf(jtag_callback_data_t arg)
252 {
253 uint8_t *in = (uint8_t *)arg;
254 *((uint32_t *)in) = buf_get_u32(in, 0, 32);
255 }
256
257 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
258 {
259 if (num_words == 0)
260 return ERROR_INVALID_ARGUMENTS;
261
262 struct xscale_common *xscale = target_to_xscale(target);
263 int retval = ERROR_OK;
264 tap_state_t path[3];
265 struct scan_field fields[3];
266 uint8_t *field0 = malloc(num_words * 1);
267 uint8_t field0_check_value = 0x2;
268 uint8_t field0_check_mask = 0x6;
269 uint32_t *field1 = malloc(num_words * 4);
270 uint8_t field2_check_value = 0x0;
271 uint8_t field2_check_mask = 0x1;
272 int words_done = 0;
273 int words_scheduled = 0;
274 int i;
275
276 path[0] = TAP_DRSELECT;
277 path[1] = TAP_DRCAPTURE;
278 path[2] = TAP_DRSHIFT;
279
280 memset(&fields, 0, sizeof fields);
281
282 fields[0].tap = target->tap;
283 fields[0].num_bits = 3;
284 fields[0].check_value = &field0_check_value;
285 fields[0].check_mask = &field0_check_mask;
286
287 fields[1].tap = target->tap;
288 fields[1].num_bits = 32;
289
290 fields[2].tap = target->tap;
291 fields[2].num_bits = 1;
292 fields[2].check_value = &field2_check_value;
293 fields[2].check_mask = &field2_check_mask;
294
295 jtag_set_end_state(TAP_IDLE);
296 xscale_jtag_set_instr(target->tap,
297 XSCALE_DBGTX << xscale->xscale_variant);
298 jtag_add_runtest(1, jtag_get_end_state()); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
299
300 /* repeat until all words have been collected */
301 int attempts = 0;
302 while (words_done < num_words)
303 {
304 /* schedule reads */
305 words_scheduled = 0;
306 for (i = words_done; i < num_words; i++)
307 {
308 fields[0].in_value = &field0[i];
309
310 jtag_add_pathmove(3, path);
311
312 fields[1].in_value = (uint8_t *)(field1 + i);
313
314 jtag_add_dr_scan_check(3, fields, jtag_set_end_state(TAP_IDLE));
315
316 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
317
318 words_scheduled++;
319 }
320
321 if ((retval = jtag_execute_queue()) != ERROR_OK)
322 {
323 LOG_ERROR("JTAG error while receiving data from debug handler");
324 break;
325 }
326
327 /* examine results */
328 for (i = words_done; i < num_words; i++)
329 {
330 if (!(field0[0] & 1))
331 {
332 /* move backwards if necessary */
333 int j;
334 for (j = i; j < num_words - 1; j++)
335 {
336 field0[j] = field0[j + 1];
337 field1[j] = field1[j + 1];
338 }
339 words_scheduled--;
340 }
341 }
342 if (words_scheduled == 0)
343 {
344 if (attempts++==1000)
345 {
346 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
347 retval = ERROR_TARGET_TIMEOUT;
348 break;
349 }
350 }
351
352 words_done += words_scheduled;
353 }
354
355 for (i = 0; i < num_words; i++)
356 *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
357
358 free(field1);
359
360 return retval;
361 }
362
363 static int xscale_read_tx(struct target *target, int consume)
364 {
365 struct xscale_common *xscale = target_to_xscale(target);
366 tap_state_t path[3];
367 tap_state_t noconsume_path[6];
368 int retval;
369 struct timeval timeout, now;
370 struct scan_field fields[3];
371 uint8_t field0_in = 0x0;
372 uint8_t field0_check_value = 0x2;
373 uint8_t field0_check_mask = 0x6;
374 uint8_t field2_check_value = 0x0;
375 uint8_t field2_check_mask = 0x1;
376
377 jtag_set_end_state(TAP_IDLE);
378
379 xscale_jtag_set_instr(target->tap,
380 XSCALE_DBGTX << xscale->xscale_variant);
381
382 path[0] = TAP_DRSELECT;
383 path[1] = TAP_DRCAPTURE;
384 path[2] = TAP_DRSHIFT;
385
386 noconsume_path[0] = TAP_DRSELECT;
387 noconsume_path[1] = TAP_DRCAPTURE;
388 noconsume_path[2] = TAP_DREXIT1;
389 noconsume_path[3] = TAP_DRPAUSE;
390 noconsume_path[4] = TAP_DREXIT2;
391 noconsume_path[5] = TAP_DRSHIFT;
392
393 memset(&fields, 0, sizeof fields);
394
395 fields[0].tap = target->tap;
396 fields[0].num_bits = 3;
397 fields[0].in_value = &field0_in;
398
399 fields[1].tap = target->tap;
400 fields[1].num_bits = 32;
401 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
402
403 fields[2].tap = target->tap;
404 fields[2].num_bits = 1;
405 uint8_t tmp;
406 fields[2].in_value = &tmp;
407
408 gettimeofday(&timeout, NULL);
409 timeval_add_time(&timeout, 1, 0);
410
411 for (;;)
412 {
413 /* if we want to consume the register content (i.e. clear TX_READY),
414 * we have to go straight from Capture-DR to Shift-DR
415 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
416 */
417 if (consume)
418 jtag_add_pathmove(3, path);
419 else
420 {
421 jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
422 }
423
424 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
425
426 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
427 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
428
429 if ((retval = jtag_execute_queue()) != ERROR_OK)
430 {
431 LOG_ERROR("JTAG error while reading TX");
432 return ERROR_TARGET_TIMEOUT;
433 }
434
435 gettimeofday(&now, NULL);
436 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
437 {
438 LOG_ERROR("time out reading TX register");
439 return ERROR_TARGET_TIMEOUT;
440 }
441 if (!((!(field0_in & 1)) && consume))
442 {
443 goto done;
444 }
445 if (debug_level >= 3)
446 {
447 LOG_DEBUG("waiting 100ms");
448 alive_sleep(100); /* avoid flooding the logs */
449 } else
450 {
451 keep_alive();
452 }
453 }
454 done:
455
456 if (!(field0_in & 1))
457 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
458
459 return ERROR_OK;
460 }
461
462 static int xscale_write_rx(struct target *target)
463 {
464 struct xscale_common *xscale = target_to_xscale(target);
465 int retval;
466 struct timeval timeout, now;
467 struct scan_field fields[3];
468 uint8_t field0_out = 0x0;
469 uint8_t field0_in = 0x0;
470 uint8_t field0_check_value = 0x2;
471 uint8_t field0_check_mask = 0x6;
472 uint8_t field2 = 0x0;
473 uint8_t field2_check_value = 0x0;
474 uint8_t field2_check_mask = 0x1;
475
476 jtag_set_end_state(TAP_IDLE);
477
478 xscale_jtag_set_instr(target->tap,
479 XSCALE_DBGRX << xscale->xscale_variant);
480
481 memset(&fields, 0, sizeof fields);
482
483 fields[0].tap = target->tap;
484 fields[0].num_bits = 3;
485 fields[0].out_value = &field0_out;
486 fields[0].in_value = &field0_in;
487
488 fields[1].tap = target->tap;
489 fields[1].num_bits = 32;
490 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
491
492 fields[2].tap = target->tap;
493 fields[2].num_bits = 1;
494 fields[2].out_value = &field2;
495 uint8_t tmp;
496 fields[2].in_value = &tmp;
497
498 gettimeofday(&timeout, NULL);
499 timeval_add_time(&timeout, 1, 0);
500
501 /* poll until rx_read is low */
502 LOG_DEBUG("polling RX");
503 for (;;)
504 {
505 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
506
507 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
508 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
509
510 if ((retval = jtag_execute_queue()) != ERROR_OK)
511 {
512 LOG_ERROR("JTAG error while writing RX");
513 return retval;
514 }
515
516 gettimeofday(&now, NULL);
517 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
518 {
519 LOG_ERROR("time out writing RX register");
520 return ERROR_TARGET_TIMEOUT;
521 }
522 if (!(field0_in & 1))
523 goto done;
524 if (debug_level >= 3)
525 {
526 LOG_DEBUG("waiting 100ms");
527 alive_sleep(100); /* avoid flooding the logs */
528 } else
529 {
530 keep_alive();
531 }
532 }
533 done:
534
535 /* set rx_valid */
536 field2 = 0x1;
537 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
538
539 if ((retval = jtag_execute_queue()) != ERROR_OK)
540 {
541 LOG_ERROR("JTAG error while writing RX");
542 return retval;
543 }
544
545 return ERROR_OK;
546 }
547
548 /* send count elements of size byte to the debug handler */
549 static int xscale_send(struct target *target, uint8_t *buffer, int count, int size)
550 {
551 struct xscale_common *xscale = target_to_xscale(target);
552 uint32_t t[3];
553 int bits[3];
554 int retval;
555 int done_count = 0;
556
557 jtag_set_end_state(TAP_IDLE);
558
559 xscale_jtag_set_instr(target->tap,
560 XSCALE_DBGRX << xscale->xscale_variant);
561
562 bits[0]=3;
563 t[0]=0;
564 bits[1]=32;
565 t[2]=1;
566 bits[2]=1;
567 int endianness = target->endianness;
568 while (done_count++ < count)
569 {
570 switch (size)
571 {
572 case 4:
573 if (endianness == TARGET_LITTLE_ENDIAN)
574 {
575 t[1]=le_to_h_u32(buffer);
576 } else
577 {
578 t[1]=be_to_h_u32(buffer);
579 }
580 break;
581 case 2:
582 if (endianness == TARGET_LITTLE_ENDIAN)
583 {
584 t[1]=le_to_h_u16(buffer);
585 } else
586 {
587 t[1]=be_to_h_u16(buffer);
588 }
589 break;
590 case 1:
591 t[1]=buffer[0];
592 break;
593 default:
594 LOG_ERROR("BUG: size neither 4, 2 nor 1");
595 return ERROR_INVALID_ARGUMENTS;
596 }
597 jtag_add_dr_out(target->tap,
598 3,
599 bits,
600 t,
601 jtag_set_end_state(TAP_IDLE));
602 buffer += size;
603 }
604
605 if ((retval = jtag_execute_queue()) != ERROR_OK)
606 {
607 LOG_ERROR("JTAG error while sending data to debug handler");
608 return retval;
609 }
610
611 return ERROR_OK;
612 }
613
614 static int xscale_send_u32(struct target *target, uint32_t value)
615 {
616 struct xscale_common *xscale = target_to_xscale(target);
617
618 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
619 return xscale_write_rx(target);
620 }
621
622 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
623 {
624 struct xscale_common *xscale = target_to_xscale(target);
625 int retval;
626 struct scan_field fields[3];
627 uint8_t field0 = 0x0;
628 uint8_t field0_check_value = 0x2;
629 uint8_t field0_check_mask = 0x7;
630 uint8_t field2 = 0x0;
631 uint8_t field2_check_value = 0x0;
632 uint8_t field2_check_mask = 0x1;
633
634 if (hold_rst != -1)
635 xscale->hold_rst = hold_rst;
636
637 if (ext_dbg_brk != -1)
638 xscale->external_debug_break = ext_dbg_brk;
639
640 jtag_set_end_state(TAP_IDLE);
641 xscale_jtag_set_instr(target->tap,
642 XSCALE_SELDCSR << xscale->xscale_variant);
643
644 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
645 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
646
647 memset(&fields, 0, sizeof fields);
648
649 fields[0].tap = target->tap;
650 fields[0].num_bits = 3;
651 fields[0].out_value = &field0;
652 uint8_t tmp;
653 fields[0].in_value = &tmp;
654
655 fields[1].tap = target->tap;
656 fields[1].num_bits = 32;
657 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
658
659 fields[2].tap = target->tap;
660 fields[2].num_bits = 1;
661 fields[2].out_value = &field2;
662 uint8_t tmp2;
663 fields[2].in_value = &tmp2;
664
665 jtag_add_dr_scan(3, fields, jtag_get_end_state());
666
667 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
668 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
669
670 if ((retval = jtag_execute_queue()) != ERROR_OK)
671 {
672 LOG_ERROR("JTAG error while writing DCSR");
673 return retval;
674 }
675
676 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
677 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
678
679 return ERROR_OK;
680 }
681
682 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
683 static unsigned int parity (unsigned int v)
684 {
685 // unsigned int ov = v;
686 v ^= v >> 16;
687 v ^= v >> 8;
688 v ^= v >> 4;
689 v &= 0xf;
690 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
691 return (0x6996 >> v) & 1;
692 }
693
694 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
695 {
696 struct xscale_common *xscale = target_to_xscale(target);
697 uint8_t packet[4];
698 uint8_t cmd;
699 int word;
700 struct scan_field fields[2];
701
702 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
703
704 /* LDIC into IR */
705 jtag_set_end_state(TAP_IDLE);
706 xscale_jtag_set_instr(target->tap,
707 XSCALE_LDIC << xscale->xscale_variant);
708
709 /* CMD is b011 to load a cacheline into the Mini ICache.
710 * Loading into the main ICache is deprecated, and unused.
711 * It's followed by three zero bits, and 27 address bits.
712 */
713 buf_set_u32(&cmd, 0, 6, 0x3);
714
715 /* virtual address of desired cache line */
716 buf_set_u32(packet, 0, 27, va >> 5);
717
718 memset(&fields, 0, sizeof fields);
719
720 fields[0].tap = target->tap;
721 fields[0].num_bits = 6;
722 fields[0].out_value = &cmd;
723
724 fields[1].tap = target->tap;
725 fields[1].num_bits = 27;
726 fields[1].out_value = packet;
727
728 jtag_add_dr_scan(2, fields, jtag_get_end_state());
729
730 /* rest of packet is a cacheline: 8 instructions, with parity */
731 fields[0].num_bits = 32;
732 fields[0].out_value = packet;
733
734 fields[1].num_bits = 1;
735 fields[1].out_value = &cmd;
736
737 for (word = 0; word < 8; word++)
738 {
739 buf_set_u32(packet, 0, 32, buffer[word]);
740
741 uint32_t value;
742 memcpy(&value, packet, sizeof(uint32_t));
743 cmd = parity(value);
744
745 jtag_add_dr_scan(2, fields, jtag_get_end_state());
746 }
747
748 return jtag_execute_queue();
749 }
750
751 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
752 {
753 struct xscale_common *xscale = target_to_xscale(target);
754 uint8_t packet[4];
755 uint8_t cmd;
756 struct scan_field fields[2];
757
758 jtag_set_end_state(TAP_IDLE);
759 xscale_jtag_set_instr(target->tap,
760 XSCALE_LDIC << xscale->xscale_variant);
761
762 /* CMD for invalidate IC line b000, bits [6:4] b000 */
763 buf_set_u32(&cmd, 0, 6, 0x0);
764
765 /* virtual address of desired cache line */
766 buf_set_u32(packet, 0, 27, va >> 5);
767
768 memset(&fields, 0, sizeof fields);
769
770 fields[0].tap = target->tap;
771 fields[0].num_bits = 6;
772 fields[0].out_value = &cmd;
773
774 fields[1].tap = target->tap;
775 fields[1].num_bits = 27;
776 fields[1].out_value = packet;
777
778 jtag_add_dr_scan(2, fields, jtag_get_end_state());
779
780 return ERROR_OK;
781 }
782
783 static int xscale_update_vectors(struct target *target)
784 {
785 struct xscale_common *xscale = target_to_xscale(target);
786 int i;
787 int retval;
788
789 uint32_t low_reset_branch, high_reset_branch;
790
791 for (i = 1; i < 8; i++)
792 {
793 /* if there's a static vector specified for this exception, override */
794 if (xscale->static_high_vectors_set & (1 << i))
795 {
796 xscale->high_vectors[i] = xscale->static_high_vectors[i];
797 }
798 else
799 {
800 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
801 if (retval == ERROR_TARGET_TIMEOUT)
802 return retval;
803 if (retval != ERROR_OK)
804 {
805 /* Some of these reads will fail as part of normal execution */
806 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
807 }
808 }
809 }
810
811 for (i = 1; i < 8; i++)
812 {
813 if (xscale->static_low_vectors_set & (1 << i))
814 {
815 xscale->low_vectors[i] = xscale->static_low_vectors[i];
816 }
817 else
818 {
819 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
820 if (retval == ERROR_TARGET_TIMEOUT)
821 return retval;
822 if (retval != ERROR_OK)
823 {
824 /* Some of these reads will fail as part of normal execution */
825 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
826 }
827 }
828 }
829
830 /* calculate branches to debug handler */
831 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
832 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
833
834 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
835 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
836
837 /* invalidate and load exception vectors in mini i-cache */
838 xscale_invalidate_ic_line(target, 0x0);
839 xscale_invalidate_ic_line(target, 0xffff0000);
840
841 xscale_load_ic(target, 0x0, xscale->low_vectors);
842 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
843
844 return ERROR_OK;
845 }
846
847 static int xscale_arch_state(struct target *target)
848 {
849 struct xscale_common *xscale = target_to_xscale(target);
850 struct arm *armv4_5 = &xscale->armv4_5_common;
851
852 static const char *state[] =
853 {
854 "disabled", "enabled"
855 };
856
857 static const char *arch_dbg_reason[] =
858 {
859 "", "\n(processor reset)", "\n(trace buffer full)"
860 };
861
862 if (armv4_5->common_magic != ARM_COMMON_MAGIC)
863 {
864 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
865 return ERROR_INVALID_ARGUMENTS;
866 }
867
868 arm_arch_state(target);
869 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s%s",
870 state[xscale->armv4_5_mmu.mmu_enabled],
871 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
872 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
873 arch_dbg_reason[xscale->arch_debug_reason]);
874
875 return ERROR_OK;
876 }
877
878 static int xscale_poll(struct target *target)
879 {
880 int retval = ERROR_OK;
881
882 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
883 {
884 enum target_state previous_state = target->state;
885 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
886 {
887
888 /* there's data to read from the tx register, we entered debug state */
889 target->state = TARGET_HALTED;
890
891 /* process debug entry, fetching current mode regs */
892 retval = xscale_debug_entry(target);
893 }
894 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
895 {
896 LOG_USER("error while polling TX register, reset CPU");
897 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
898 target->state = TARGET_HALTED;
899 }
900
901 /* debug_entry could have overwritten target state (i.e. immediate resume)
902 * don't signal event handlers in that case
903 */
904 if (target->state != TARGET_HALTED)
905 return ERROR_OK;
906
907 /* if target was running, signal that we halted
908 * otherwise we reentered from debug execution */
909 if (previous_state == TARGET_RUNNING)
910 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
911 else
912 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
913 }
914
915 return retval;
916 }
917
918 static int xscale_debug_entry(struct target *target)
919 {
920 struct xscale_common *xscale = target_to_xscale(target);
921 struct arm *armv4_5 = &xscale->armv4_5_common;
922 uint32_t pc;
923 uint32_t buffer[10];
924 int i;
925 int retval;
926 uint32_t moe;
927
928 /* clear external dbg break (will be written on next DCSR read) */
929 xscale->external_debug_break = 0;
930 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
931 return retval;
932
933 /* get r0, pc, r1 to r7 and cpsr */
934 if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
935 return retval;
936
937 /* move r0 from buffer to register cache */
938 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
939 armv4_5->core_cache->reg_list[0].dirty = 1;
940 armv4_5->core_cache->reg_list[0].valid = 1;
941 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
942
943 /* move pc from buffer to register cache */
944 buf_set_u32(armv4_5->pc->value, 0, 32, buffer[1]);
945 armv4_5->pc->dirty = 1;
946 armv4_5->pc->valid = 1;
947 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
948
949 /* move data from buffer to register cache */
950 for (i = 1; i <= 7; i++)
951 {
952 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
953 armv4_5->core_cache->reg_list[i].dirty = 1;
954 armv4_5->core_cache->reg_list[i].valid = 1;
955 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
956 }
957
958 arm_set_cpsr(armv4_5, buffer[9]);
959 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
960
961 if (!is_arm_mode(armv4_5->core_mode))
962 {
963 target->state = TARGET_UNKNOWN;
964 LOG_ERROR("cpsr contains invalid mode value - communication failure");
965 return ERROR_TARGET_FAILURE;
966 }
967 LOG_DEBUG("target entered debug state in %s mode",
968 arm_mode_name(armv4_5->core_mode));
969
970 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
971 if (armv4_5->spsr) {
972 xscale_receive(target, buffer, 8);
973 buf_set_u32(armv4_5->spsr->value, 0, 32, buffer[7]);
974 armv4_5->spsr->dirty = false;
975 armv4_5->spsr->valid = true;
976 }
977 else
978 {
979 /* r8 to r14, but no spsr */
980 xscale_receive(target, buffer, 7);
981 }
982
983 /* move data from buffer to right banked register in cache */
984 for (i = 8; i <= 14; i++)
985 {
986 struct reg *r = arm_reg_current(armv4_5, i);
987
988 buf_set_u32(r->value, 0, 32, buffer[i - 8]);
989 r->dirty = false;
990 r->valid = true;
991 }
992
993 /* examine debug reason */
994 xscale_read_dcsr(target);
995 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
996
997 /* stored PC (for calculating fixup) */
998 pc = buf_get_u32(armv4_5->pc->value, 0, 32);
999
1000 switch (moe)
1001 {
1002 case 0x0: /* Processor reset */
1003 target->debug_reason = DBG_REASON_DBGRQ;
1004 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
1005 pc -= 4;
1006 break;
1007 case 0x1: /* Instruction breakpoint hit */
1008 target->debug_reason = DBG_REASON_BREAKPOINT;
1009 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1010 pc -= 4;
1011 break;
1012 case 0x2: /* Data breakpoint hit */
1013 target->debug_reason = DBG_REASON_WATCHPOINT;
1014 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1015 pc -= 4;
1016 break;
1017 case 0x3: /* BKPT instruction executed */
1018 target->debug_reason = DBG_REASON_BREAKPOINT;
1019 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1020 pc -= 4;
1021 break;
1022 case 0x4: /* Ext. debug event */
1023 target->debug_reason = DBG_REASON_DBGRQ;
1024 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1025 pc -= 4;
1026 break;
1027 case 0x5: /* Vector trap occured */
1028 target->debug_reason = DBG_REASON_BREAKPOINT;
1029 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1030 pc -= 4;
1031 break;
1032 case 0x6: /* Trace buffer full break */
1033 target->debug_reason = DBG_REASON_DBGRQ;
1034 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1035 pc -= 4;
1036 break;
1037 case 0x7: /* Reserved (may flag Hot-Debug support) */
1038 default:
1039 LOG_ERROR("Method of Entry is 'Reserved'");
1040 exit(-1);
1041 break;
1042 }
1043
1044 /* apply PC fixup */
1045 buf_set_u32(armv4_5->pc->value, 0, 32, pc);
1046
1047 /* on the first debug entry, identify cache type */
1048 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1049 {
1050 uint32_t cache_type_reg;
1051
1052 /* read cp15 cache type register */
1053 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1054 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1055
1056 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1057 }
1058
1059 /* examine MMU and Cache settings */
1060 /* read cp15 control register */
1061 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1062 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1063 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1064 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1065 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1066
1067 /* tracing enabled, read collected trace data */
1068 if (xscale->trace.buffer_enabled)
1069 {
1070 xscale_read_trace(target);
1071 xscale->trace.buffer_fill--;
1072
1073 /* resume if we're still collecting trace data */
1074 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1075 && (xscale->trace.buffer_fill > 0))
1076 {
1077 xscale_resume(target, 1, 0x0, 1, 0);
1078 }
1079 else
1080 {
1081 xscale->trace.buffer_enabled = 0;
1082 }
1083 }
1084
1085 return ERROR_OK;
1086 }
1087
1088 static int xscale_halt(struct target *target)
1089 {
1090 struct xscale_common *xscale = target_to_xscale(target);
1091
1092 LOG_DEBUG("target->state: %s",
1093 target_state_name(target));
1094
1095 if (target->state == TARGET_HALTED)
1096 {
1097 LOG_DEBUG("target was already halted");
1098 return ERROR_OK;
1099 }
1100 else if (target->state == TARGET_UNKNOWN)
1101 {
1102 /* this must not happen for a xscale target */
1103 LOG_ERROR("target was in unknown state when halt was requested");
1104 return ERROR_TARGET_INVALID;
1105 }
1106 else if (target->state == TARGET_RESET)
1107 {
1108 LOG_DEBUG("target->state == TARGET_RESET");
1109 }
1110 else
1111 {
1112 /* assert external dbg break */
1113 xscale->external_debug_break = 1;
1114 xscale_read_dcsr(target);
1115
1116 target->debug_reason = DBG_REASON_DBGRQ;
1117 }
1118
1119 return ERROR_OK;
1120 }
1121
1122 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1123 {
1124 struct xscale_common *xscale = target_to_xscale(target);
1125 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1126 int retval;
1127
1128 if (xscale->ibcr0_used)
1129 {
1130 struct breakpoint *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1131
1132 if (ibcr0_bp)
1133 {
1134 xscale_unset_breakpoint(target, ibcr0_bp);
1135 }
1136 else
1137 {
1138 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1139 exit(-1);
1140 }
1141 }
1142
1143 if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
1144 return retval;
1145
1146 return ERROR_OK;
1147 }
1148
1149 static int xscale_disable_single_step(struct target *target)
1150 {
1151 struct xscale_common *xscale = target_to_xscale(target);
1152 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1153 int retval;
1154
1155 if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
1156 return retval;
1157
1158 return ERROR_OK;
1159 }
1160
1161 static void xscale_enable_watchpoints(struct target *target)
1162 {
1163 struct watchpoint *watchpoint = target->watchpoints;
1164
1165 while (watchpoint)
1166 {
1167 if (watchpoint->set == 0)
1168 xscale_set_watchpoint(target, watchpoint);
1169 watchpoint = watchpoint->next;
1170 }
1171 }
1172
1173 static void xscale_enable_breakpoints(struct target *target)
1174 {
1175 struct breakpoint *breakpoint = target->breakpoints;
1176
1177 /* set any pending breakpoints */
1178 while (breakpoint)
1179 {
1180 if (breakpoint->set == 0)
1181 xscale_set_breakpoint(target, breakpoint);
1182 breakpoint = breakpoint->next;
1183 }
1184 }
1185
1186 static int xscale_resume(struct target *target, int current,
1187 uint32_t address, int handle_breakpoints, int debug_execution)
1188 {
1189 struct xscale_common *xscale = target_to_xscale(target);
1190 struct arm *armv4_5 = &xscale->armv4_5_common;
1191 struct breakpoint *breakpoint = target->breakpoints;
1192 uint32_t current_pc;
1193 int retval;
1194 int i;
1195
1196 LOG_DEBUG("-");
1197
1198 if (target->state != TARGET_HALTED)
1199 {
1200 LOG_WARNING("target not halted");
1201 return ERROR_TARGET_NOT_HALTED;
1202 }
1203
1204 if (!debug_execution)
1205 {
1206 target_free_all_working_areas(target);
1207 }
1208
1209 /* update vector tables */
1210 if ((retval = xscale_update_vectors(target)) != ERROR_OK)
1211 return retval;
1212
1213 /* current = 1: continue on current pc, otherwise continue at <address> */
1214 if (!current)
1215 buf_set_u32(armv4_5->pc->value, 0, 32, address);
1216
1217 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1218
1219 /* if we're at the reset vector, we have to simulate the branch */
1220 if (current_pc == 0x0)
1221 {
1222 arm_simulate_step(target, NULL);
1223 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1224 }
1225
1226 /* the front-end may request us not to handle breakpoints */
1227 if (handle_breakpoints)
1228 {
1229 breakpoint = breakpoint_find(target,
1230 buf_get_u32(armv4_5->pc->value, 0, 32));
1231 if (breakpoint != NULL)
1232 {
1233 uint32_t next_pc;
1234
1235 /* there's a breakpoint at the current PC, we have to step over it */
1236 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1237 xscale_unset_breakpoint(target, breakpoint);
1238
1239 /* calculate PC of next instruction */
1240 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1241 {
1242 uint32_t current_opcode;
1243 target_read_u32(target, current_pc, &current_opcode);
1244 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1245 }
1246
1247 LOG_DEBUG("enable single-step");
1248 xscale_enable_single_step(target, next_pc);
1249
1250 /* restore banked registers */
1251 retval = xscale_restore_banked(target);
1252
1253 /* send resume request (command 0x30 or 0x31)
1254 * clean the trace buffer if it is to be enabled (0x62) */
1255 if (xscale->trace.buffer_enabled)
1256 {
1257 xscale_send_u32(target, 0x62);
1258 xscale_send_u32(target, 0x31);
1259 }
1260 else
1261 xscale_send_u32(target, 0x30);
1262
1263 /* send CPSR */
1264 xscale_send_u32(target,
1265 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1266 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1267 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1268
1269 for (i = 7; i >= 0; i--)
1270 {
1271 /* send register */
1272 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1273 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1274 }
1275
1276 /* send PC */
1277 xscale_send_u32(target,
1278 buf_get_u32(armv4_5->pc->value, 0, 32));
1279 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32,
1280 buf_get_u32(armv4_5->pc->value, 0, 32));
1281
1282 /* wait for and process debug entry */
1283 xscale_debug_entry(target);
1284
1285 LOG_DEBUG("disable single-step");
1286 xscale_disable_single_step(target);
1287
1288 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1289 xscale_set_breakpoint(target, breakpoint);
1290 }
1291 }
1292
1293 /* enable any pending breakpoints and watchpoints */
1294 xscale_enable_breakpoints(target);
1295 xscale_enable_watchpoints(target);
1296
1297 /* restore banked registers */
1298 retval = xscale_restore_banked(target);
1299
1300 /* send resume request (command 0x30 or 0x31)
1301 * clean the trace buffer if it is to be enabled (0x62) */
1302 if (xscale->trace.buffer_enabled)
1303 {
1304 xscale_send_u32(target, 0x62);
1305 xscale_send_u32(target, 0x31);
1306 }
1307 else
1308 xscale_send_u32(target, 0x30);
1309
1310 /* send CPSR */
1311 xscale_send_u32(target, buf_get_u32(armv4_5->cpsr->value, 0, 32));
1312 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1313 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1314
1315 for (i = 7; i >= 0; i--)
1316 {
1317 /* send register */
1318 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1319 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1320 }
1321
1322 /* send PC */
1323 xscale_send_u32(target, buf_get_u32(armv4_5->pc->value, 0, 32));
1324 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1325 buf_get_u32(armv4_5->pc->value, 0, 32));
1326
1327 target->debug_reason = DBG_REASON_NOTHALTED;
1328
1329 if (!debug_execution)
1330 {
1331 /* registers are now invalid */
1332 register_cache_invalidate(armv4_5->core_cache);
1333 target->state = TARGET_RUNNING;
1334 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1335 }
1336 else
1337 {
1338 target->state = TARGET_DEBUG_RUNNING;
1339 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1340 }
1341
1342 LOG_DEBUG("target resumed");
1343
1344 return ERROR_OK;
1345 }
1346
1347 static int xscale_step_inner(struct target *target, int current,
1348 uint32_t address, int handle_breakpoints)
1349 {
1350 struct xscale_common *xscale = target_to_xscale(target);
1351 struct arm *armv4_5 = &xscale->armv4_5_common;
1352 uint32_t next_pc;
1353 int retval;
1354 int i;
1355
1356 target->debug_reason = DBG_REASON_SINGLESTEP;
1357
1358 /* calculate PC of next instruction */
1359 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1360 {
1361 uint32_t current_opcode, current_pc;
1362 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1363
1364 target_read_u32(target, current_pc, &current_opcode);
1365 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1366 return retval;
1367 }
1368
1369 LOG_DEBUG("enable single-step");
1370 if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
1371 return retval;
1372
1373 /* restore banked registers */
1374 if ((retval = xscale_restore_banked(target)) != ERROR_OK)
1375 return retval;
1376
1377 /* send resume request (command 0x30 or 0x31)
1378 * clean the trace buffer if it is to be enabled (0x62) */
1379 if (xscale->trace.buffer_enabled)
1380 {
1381 if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
1382 return retval;
1383 if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
1384 return retval;
1385 }
1386 else
1387 if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
1388 return retval;
1389
1390 /* send CPSR */
1391 retval = xscale_send_u32(target,
1392 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1393 if (retval != ERROR_OK)
1394 return retval;
1395 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1396 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1397
1398 for (i = 7; i >= 0; i--)
1399 {
1400 /* send register */
1401 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
1402 return retval;
1403 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1404 }
1405
1406 /* send PC */
1407 retval = xscale_send_u32(target,
1408 buf_get_u32(armv4_5->pc->value, 0, 32));
1409 if (retval != ERROR_OK)
1410 return retval;
1411 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1412 buf_get_u32(armv4_5->pc->value, 0, 32));
1413
1414 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1415
1416 /* registers are now invalid */
1417 register_cache_invalidate(armv4_5->core_cache);
1418
1419 /* wait for and process debug entry */
1420 if ((retval = xscale_debug_entry(target)) != ERROR_OK)
1421 return retval;
1422
1423 LOG_DEBUG("disable single-step");
1424 if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
1425 return retval;
1426
1427 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1428
1429 return ERROR_OK;
1430 }
1431
1432 static int xscale_step(struct target *target, int current,
1433 uint32_t address, int handle_breakpoints)
1434 {
1435 struct arm *armv4_5 = target_to_arm(target);
1436 struct breakpoint *breakpoint = NULL;
1437
1438 uint32_t current_pc;
1439 int retval;
1440
1441 if (target->state != TARGET_HALTED)
1442 {
1443 LOG_WARNING("target not halted");
1444 return ERROR_TARGET_NOT_HALTED;
1445 }
1446
1447 /* current = 1: continue on current pc, otherwise continue at <address> */
1448 if (!current)
1449 buf_set_u32(armv4_5->pc->value, 0, 32, address);
1450
1451 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1452
1453 /* if we're at the reset vector, we have to simulate the step */
1454 if (current_pc == 0x0)
1455 {
1456 if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
1457 return retval;
1458 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1459
1460 target->debug_reason = DBG_REASON_SINGLESTEP;
1461 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1462
1463 return ERROR_OK;
1464 }
1465
1466 /* the front-end may request us not to handle breakpoints */
1467 if (handle_breakpoints)
1468 breakpoint = breakpoint_find(target,
1469 buf_get_u32(armv4_5->pc->value, 0, 32));
1470 if (breakpoint != NULL) {
1471 retval = xscale_unset_breakpoint(target, breakpoint);
1472 if (retval != ERROR_OK)
1473 return retval;
1474 }
1475
1476 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1477
1478 if (breakpoint)
1479 {
1480 xscale_set_breakpoint(target, breakpoint);
1481 }
1482
1483 LOG_DEBUG("target stepped");
1484
1485 return ERROR_OK;
1486
1487 }
1488
1489 static int xscale_assert_reset(struct target *target)
1490 {
1491 struct xscale_common *xscale = target_to_xscale(target);
1492
1493 LOG_DEBUG("target->state: %s",
1494 target_state_name(target));
1495
1496 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1497 * end up in T-L-R, which would reset JTAG
1498 */
1499 jtag_set_end_state(TAP_IDLE);
1500 xscale_jtag_set_instr(target->tap,
1501 XSCALE_SELDCSR << xscale->xscale_variant);
1502
1503 /* set Hold reset, Halt mode and Trap Reset */
1504 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1505 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1506 xscale_write_dcsr(target, 1, 0);
1507
1508 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1509 xscale_jtag_set_instr(target->tap, ~0);
1510 jtag_execute_queue();
1511
1512 /* assert reset */
1513 jtag_add_reset(0, 1);
1514
1515 /* sleep 1ms, to be sure we fulfill any requirements */
1516 jtag_add_sleep(1000);
1517 jtag_execute_queue();
1518
1519 target->state = TARGET_RESET;
1520
1521 if (target->reset_halt)
1522 {
1523 int retval;
1524 if ((retval = target_halt(target)) != ERROR_OK)
1525 return retval;
1526 }
1527
1528 return ERROR_OK;
1529 }
1530
1531 static int xscale_deassert_reset(struct target *target)
1532 {
1533 struct xscale_common *xscale = target_to_xscale(target);
1534 struct breakpoint *breakpoint = target->breakpoints;
1535
1536 LOG_DEBUG("-");
1537
1538 xscale->ibcr_available = 2;
1539 xscale->ibcr0_used = 0;
1540 xscale->ibcr1_used = 0;
1541
1542 xscale->dbr_available = 2;
1543 xscale->dbr0_used = 0;
1544 xscale->dbr1_used = 0;
1545
1546 /* mark all hardware breakpoints as unset */
1547 while (breakpoint)
1548 {
1549 if (breakpoint->type == BKPT_HARD)
1550 {
1551 breakpoint->set = 0;
1552 }
1553 breakpoint = breakpoint->next;
1554 }
1555
1556 register_cache_invalidate(xscale->armv4_5_common.core_cache);
1557
1558 /* FIXME mark hardware watchpoints got unset too. Also,
1559 * at least some of the XScale registers are invalid...
1560 */
1561
1562 /*
1563 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1564 * contents got invalidated. Safer to force that, so writing new
1565 * contents can't ever fail..
1566 */
1567 {
1568 uint32_t address;
1569 unsigned buf_cnt;
1570 const uint8_t *buffer = xscale_debug_handler;
1571 int retval;
1572
1573 /* release SRST */
1574 jtag_add_reset(0, 0);
1575
1576 /* wait 300ms; 150 and 100ms were not enough */
1577 jtag_add_sleep(300*1000);
1578
1579 jtag_add_runtest(2030, jtag_set_end_state(TAP_IDLE));
1580 jtag_execute_queue();
1581
1582 /* set Hold reset, Halt mode and Trap Reset */
1583 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1584 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1585 xscale_write_dcsr(target, 1, 0);
1586
1587 /* Load the debug handler into the mini-icache. Since
1588 * it's using halt mode (not monitor mode), it runs in
1589 * "Special Debug State" for access to registers, memory,
1590 * coprocessors, trace data, etc.
1591 */
1592 address = xscale->handler_address;
1593 for (unsigned binary_size = sizeof xscale_debug_handler - 1;
1594 binary_size > 0;
1595 binary_size -= buf_cnt, buffer += buf_cnt)
1596 {
1597 uint32_t cache_line[8];
1598 unsigned i;
1599
1600 buf_cnt = binary_size;
1601 if (buf_cnt > 32)
1602 buf_cnt = 32;
1603
1604 for (i = 0; i < buf_cnt; i += 4)
1605 {
1606 /* convert LE buffer to host-endian uint32_t */
1607 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1608 }
1609
1610 for (; i < 32; i += 4)
1611 {
1612 cache_line[i / 4] = 0xe1a08008;
1613 }
1614
1615 /* only load addresses other than the reset vectors */
1616 if ((address % 0x400) != 0x0)
1617 {
1618 retval = xscale_load_ic(target, address,
1619 cache_line);
1620 if (retval != ERROR_OK)
1621 return retval;
1622 }
1623
1624 address += buf_cnt;
1625 };
1626
1627 retval = xscale_load_ic(target, 0x0,
1628 xscale->low_vectors);
1629 if (retval != ERROR_OK)
1630 return retval;
1631 retval = xscale_load_ic(target, 0xffff0000,
1632 xscale->high_vectors);
1633 if (retval != ERROR_OK)
1634 return retval;
1635
1636 jtag_add_runtest(30, jtag_set_end_state(TAP_IDLE));
1637
1638 jtag_add_sleep(100000);
1639
1640 /* set Hold reset, Halt mode and Trap Reset */
1641 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1642 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1643 xscale_write_dcsr(target, 1, 0);
1644
1645 /* clear Hold reset to let the target run (should enter debug handler) */
1646 xscale_write_dcsr(target, 0, 1);
1647 target->state = TARGET_RUNNING;
1648
1649 if (!target->reset_halt)
1650 {
1651 jtag_add_sleep(10000);
1652
1653 /* we should have entered debug now */
1654 xscale_debug_entry(target);
1655 target->state = TARGET_HALTED;
1656
1657 /* resume the target */
1658 xscale_resume(target, 1, 0x0, 1, 0);
1659 }
1660 }
1661
1662 return ERROR_OK;
1663 }
1664
1665 static int xscale_read_core_reg(struct target *target, struct reg *r,
1666 int num, enum arm_mode mode)
1667 {
1668 /** \todo add debug handler support for core register reads */
1669 LOG_ERROR("not implemented");
1670 return ERROR_OK;
1671 }
1672
1673 static int xscale_write_core_reg(struct target *target, struct reg *r,
1674 int num, enum arm_mode mode, uint32_t value)
1675 {
1676 /** \todo add debug handler support for core register writes */
1677 LOG_ERROR("not implemented");
1678 return ERROR_OK;
1679 }
1680
1681 static int xscale_full_context(struct target *target)
1682 {
1683 struct arm *armv4_5 = target_to_arm(target);
1684
1685 uint32_t *buffer;
1686
1687 int i, j;
1688
1689 LOG_DEBUG("-");
1690
1691 if (target->state != TARGET_HALTED)
1692 {
1693 LOG_WARNING("target not halted");
1694 return ERROR_TARGET_NOT_HALTED;
1695 }
1696
1697 buffer = malloc(4 * 8);
1698
1699 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1700 * we can't enter User mode on an XScale (unpredictable),
1701 * but User shares registers with SYS
1702 */
1703 for (i = 1; i < 7; i++)
1704 {
1705 enum arm_mode mode = armv4_5_number_to_mode(i);
1706 bool valid = true;
1707 struct reg *r;
1708
1709 if (mode == ARM_MODE_USR)
1710 continue;
1711
1712 /* check if there are invalid registers in the current mode
1713 */
1714 for (j = 0; valid && j <= 16; j++)
1715 {
1716 if (!ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1717 mode, j).valid)
1718 valid = false;
1719 }
1720 if (valid)
1721 continue;
1722
1723 /* request banked registers */
1724 xscale_send_u32(target, 0x0);
1725
1726 /* send CPSR for desired bank mode */
1727 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1728
1729 /* get banked registers: r8 to r14; and SPSR
1730 * except in USR/SYS mode
1731 */
1732 if (mode != ARM_MODE_SYS) {
1733 /* SPSR */
1734 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1735 mode, 16);
1736
1737 xscale_receive(target, buffer, 8);
1738
1739 buf_set_u32(r->value, 0, 32, buffer[7]);
1740 r->dirty = false;
1741 r->valid = true;
1742 } else {
1743 xscale_receive(target, buffer, 7);
1744 }
1745
1746 /* move data from buffer to register cache */
1747 for (j = 8; j <= 14; j++)
1748 {
1749 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1750 mode, j);
1751
1752 buf_set_u32(r->value, 0, 32, buffer[j - 8]);
1753 r->dirty = false;
1754 r->valid = true;
1755 }
1756 }
1757
1758 free(buffer);
1759
1760 return ERROR_OK;
1761 }
1762
1763 static int xscale_restore_banked(struct target *target)
1764 {
1765 struct arm *armv4_5 = target_to_arm(target);
1766
1767 int i, j;
1768
1769 if (target->state != TARGET_HALTED)
1770 {
1771 LOG_WARNING("target not halted");
1772 return ERROR_TARGET_NOT_HALTED;
1773 }
1774
1775 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1776 * and check if any banked registers need to be written. Ignore
1777 * USR mode (number 0) in favor of SYS; we can't enter User mode on
1778 * an XScale (unpredictable), but they share all registers.
1779 */
1780 for (i = 1; i < 7; i++)
1781 {
1782 enum arm_mode mode = armv4_5_number_to_mode(i);
1783 struct reg *r;
1784
1785 if (mode == ARM_MODE_USR)
1786 continue;
1787
1788 /* check if there are dirty registers in this mode */
1789 for (j = 8; j <= 14; j++)
1790 {
1791 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1792 mode, j).dirty)
1793 goto dirty;
1794 }
1795
1796 /* if not USR/SYS, check if the SPSR needs to be written */
1797 if (mode != ARM_MODE_SYS)
1798 {
1799 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1800 mode, 16).dirty)
1801 goto dirty;
1802 }
1803
1804 /* there's nothing to flush for this mode */
1805 continue;
1806
1807 dirty:
1808 /* command 0x1: "send banked registers" */
1809 xscale_send_u32(target, 0x1);
1810
1811 /* send CPSR for desired mode */
1812 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1813
1814 /* send r8 to r14/lr ... only FIQ needs more than r13..r14,
1815 * but this protocol doesn't understand that nuance.
1816 */
1817 for (j = 8; j <= 14; j++) {
1818 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1819 mode, j);
1820 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1821 r->dirty = false;
1822 }
1823
1824 /* send spsr if not in USR/SYS mode */
1825 if (mode != ARM_MODE_SYS) {
1826 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1827 mode, 16);
1828 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1829 r->dirty = false;
1830 }
1831 }
1832
1833 return ERROR_OK;
1834 }
1835
1836 static int xscale_read_memory(struct target *target, uint32_t address,
1837 uint32_t size, uint32_t count, uint8_t *buffer)
1838 {
1839 struct xscale_common *xscale = target_to_xscale(target);
1840 uint32_t *buf32;
1841 uint32_t i;
1842 int retval;
1843
1844 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1845
1846 if (target->state != TARGET_HALTED)
1847 {
1848 LOG_WARNING("target not halted");
1849 return ERROR_TARGET_NOT_HALTED;
1850 }
1851
1852 /* sanitize arguments */
1853 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1854 return ERROR_INVALID_ARGUMENTS;
1855
1856 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1857 return ERROR_TARGET_UNALIGNED_ACCESS;
1858
1859 /* send memory read request (command 0x1n, n: access size) */
1860 if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
1861 return retval;
1862
1863 /* send base address for read request */
1864 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1865 return retval;
1866
1867 /* send number of requested data words */
1868 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1869 return retval;
1870
1871 /* receive data from target (count times 32-bit words in host endianness) */
1872 buf32 = malloc(4 * count);
1873 if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
1874 return retval;
1875
1876 /* extract data from host-endian buffer into byte stream */
1877 for (i = 0; i < count; i++)
1878 {
1879 switch (size)
1880 {
1881 case 4:
1882 target_buffer_set_u32(target, buffer, buf32[i]);
1883 buffer += 4;
1884 break;
1885 case 2:
1886 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1887 buffer += 2;
1888 break;
1889 case 1:
1890 *buffer++ = buf32[i] & 0xff;
1891 break;
1892 default:
1893 LOG_ERROR("invalid read size");
1894 return ERROR_INVALID_ARGUMENTS;
1895 }
1896 }
1897
1898 free(buf32);
1899
1900 /* examine DCSR, to see if Sticky Abort (SA) got set */
1901 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1902 return retval;
1903 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1904 {
1905 /* clear SA bit */
1906 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1907 return retval;
1908
1909 return ERROR_TARGET_DATA_ABORT;
1910 }
1911
1912 return ERROR_OK;
1913 }
1914
1915 static int xscale_read_phys_memory(struct target *target, uint32_t address,
1916 uint32_t size, uint32_t count, uint8_t *buffer)
1917 {
1918 struct xscale_common *xscale = target_to_xscale(target);
1919
1920 /* with MMU inactive, there are only physical addresses */
1921 if (!xscale->armv4_5_mmu.mmu_enabled)
1922 return xscale_read_memory(target, address, size, count, buffer);
1923
1924 /** \todo: provide a non-stub implementation of this routine. */
1925 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1926 target_name(target), __func__);
1927 return ERROR_FAIL;
1928 }
1929
1930 static int xscale_write_memory(struct target *target, uint32_t address,
1931 uint32_t size, uint32_t count, uint8_t *buffer)
1932 {
1933 struct xscale_common *xscale = target_to_xscale(target);
1934 int retval;
1935
1936 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1937
1938 if (target->state != TARGET_HALTED)
1939 {
1940 LOG_WARNING("target not halted");
1941 return ERROR_TARGET_NOT_HALTED;
1942 }
1943
1944 /* sanitize arguments */
1945 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1946 return ERROR_INVALID_ARGUMENTS;
1947
1948 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1949 return ERROR_TARGET_UNALIGNED_ACCESS;
1950
1951 /* send memory write request (command 0x2n, n: access size) */
1952 if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
1953 return retval;
1954
1955 /* send base address for read request */
1956 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1957 return retval;
1958
1959 /* send number of requested data words to be written*/
1960 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1961 return retval;
1962
1963 /* extract data from host-endian buffer into byte stream */
1964 #if 0
1965 for (i = 0; i < count; i++)
1966 {
1967 switch (size)
1968 {
1969 case 4:
1970 value = target_buffer_get_u32(target, buffer);
1971 xscale_send_u32(target, value);
1972 buffer += 4;
1973 break;
1974 case 2:
1975 value = target_buffer_get_u16(target, buffer);
1976 xscale_send_u32(target, value);
1977 buffer += 2;
1978 break;
1979 case 1:
1980 value = *buffer;
1981 xscale_send_u32(target, value);
1982 buffer += 1;
1983 break;
1984 default:
1985 LOG_ERROR("should never get here");
1986 exit(-1);
1987 }
1988 }
1989 #endif
1990 if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
1991 return retval;
1992
1993 /* examine DCSR, to see if Sticky Abort (SA) got set */
1994 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1995 return retval;
1996 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1997 {
1998 /* clear SA bit */
1999 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
2000 return retval;
2001
2002 return ERROR_TARGET_DATA_ABORT;
2003 }
2004
2005 return ERROR_OK;
2006 }
2007
2008 static int xscale_write_phys_memory(struct target *target, uint32_t address,
2009 uint32_t size, uint32_t count, uint8_t *buffer)
2010 {
2011 struct xscale_common *xscale = target_to_xscale(target);
2012
2013 /* with MMU inactive, there are only physical addresses */
2014 if (!xscale->armv4_5_mmu.mmu_enabled)
2015 return xscale_read_memory(target, address, size, count, buffer);
2016
2017 /** \todo: provide a non-stub implementation of this routine. */
2018 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
2019 target_name(target), __func__);
2020 return ERROR_FAIL;
2021 }
2022
2023 static int xscale_bulk_write_memory(struct target *target, uint32_t address,
2024 uint32_t count, uint8_t *buffer)
2025 {
2026 return xscale_write_memory(target, address, 4, count, buffer);
2027 }
2028
2029 static uint32_t xscale_get_ttb(struct target *target)
2030 {
2031 struct xscale_common *xscale = target_to_xscale(target);
2032 uint32_t ttb;
2033
2034 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
2035 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
2036
2037 return ttb;
2038 }
2039
2040 static void xscale_disable_mmu_caches(struct target *target, int mmu,
2041 int d_u_cache, int i_cache)
2042 {
2043 struct xscale_common *xscale = target_to_xscale(target);
2044 uint32_t cp15_control;
2045
2046 /* read cp15 control register */
2047 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2048 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2049
2050 if (mmu)
2051 cp15_control &= ~0x1U;
2052
2053 if (d_u_cache)
2054 {
2055 /* clean DCache */
2056 xscale_send_u32(target, 0x50);
2057 xscale_send_u32(target, xscale->cache_clean_address);
2058
2059 /* invalidate DCache */
2060 xscale_send_u32(target, 0x51);
2061
2062 cp15_control &= ~0x4U;
2063 }
2064
2065 if (i_cache)
2066 {
2067 /* invalidate ICache */
2068 xscale_send_u32(target, 0x52);
2069 cp15_control &= ~0x1000U;
2070 }
2071
2072 /* write new cp15 control register */
2073 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2074
2075 /* execute cpwait to ensure outstanding operations complete */
2076 xscale_send_u32(target, 0x53);
2077 }
2078
2079 static void xscale_enable_mmu_caches(struct target *target, int mmu,
2080 int d_u_cache, int i_cache)
2081 {
2082 struct xscale_common *xscale = target_to_xscale(target);
2083 uint32_t cp15_control;
2084
2085 /* read cp15 control register */
2086 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2087 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2088
2089 if (mmu)
2090 cp15_control |= 0x1U;
2091
2092 if (d_u_cache)
2093 cp15_control |= 0x4U;
2094
2095 if (i_cache)
2096 cp15_control |= 0x1000U;
2097
2098 /* write new cp15 control register */
2099 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2100
2101 /* execute cpwait to ensure outstanding operations complete */
2102 xscale_send_u32(target, 0x53);
2103 }
2104
2105 static int xscale_set_breakpoint(struct target *target,
2106 struct breakpoint *breakpoint)
2107 {
2108 int retval;
2109 struct xscale_common *xscale = target_to_xscale(target);
2110
2111 if (target->state != TARGET_HALTED)
2112 {
2113 LOG_WARNING("target not halted");
2114 return ERROR_TARGET_NOT_HALTED;
2115 }
2116
2117 if (breakpoint->set)
2118 {
2119 LOG_WARNING("breakpoint already set");
2120 return ERROR_OK;
2121 }
2122
2123 if (breakpoint->type == BKPT_HARD)
2124 {
2125 uint32_t value = breakpoint->address | 1;
2126 if (!xscale->ibcr0_used)
2127 {
2128 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2129 xscale->ibcr0_used = 1;
2130 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2131 }
2132 else if (!xscale->ibcr1_used)
2133 {
2134 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2135 xscale->ibcr1_used = 1;
2136 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2137 }
2138 else
2139 {
2140 LOG_ERROR("BUG: no hardware comparator available");
2141 return ERROR_OK;
2142 }
2143 }
2144 else if (breakpoint->type == BKPT_SOFT)
2145 {
2146 if (breakpoint->length == 4)
2147 {
2148 /* keep the original instruction in target endianness */
2149 if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2150 {
2151 return retval;
2152 }
2153 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2154 if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2155 {
2156 return retval;
2157 }
2158 }
2159 else
2160 {
2161 /* keep the original instruction in target endianness */
2162 if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2163 {
2164 return retval;
2165 }
2166 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2167 if ((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2168 {
2169 return retval;
2170 }
2171 }
2172 breakpoint->set = 1;
2173 }
2174
2175 return ERROR_OK;
2176 }
2177
2178 static int xscale_add_breakpoint(struct target *target,
2179 struct breakpoint *breakpoint)
2180 {
2181 struct xscale_common *xscale = target_to_xscale(target);
2182
2183 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2184 {
2185 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2186 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2187 }
2188
2189 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2190 {
2191 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2192 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2193 }
2194
2195 if (breakpoint->type == BKPT_HARD)
2196 {
2197 xscale->ibcr_available--;
2198 }
2199
2200 return ERROR_OK;
2201 }
2202
2203 static int xscale_unset_breakpoint(struct target *target,
2204 struct breakpoint *breakpoint)
2205 {
2206 int retval;
2207 struct xscale_common *xscale = target_to_xscale(target);
2208
2209 if (target->state != TARGET_HALTED)
2210 {
2211 LOG_WARNING("target not halted");
2212 return ERROR_TARGET_NOT_HALTED;
2213 }
2214
2215 if (!breakpoint->set)
2216 {
2217 LOG_WARNING("breakpoint not set");
2218 return ERROR_OK;
2219 }
2220
2221 if (breakpoint->type == BKPT_HARD)
2222 {
2223 if (breakpoint->set == 1)
2224 {
2225 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2226 xscale->ibcr0_used = 0;
2227 }
2228 else if (breakpoint->set == 2)
2229 {
2230 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2231 xscale->ibcr1_used = 0;
2232 }
2233 breakpoint->set = 0;
2234 }
2235 else
2236 {
2237 /* restore original instruction (kept in target endianness) */
2238 if (breakpoint->length == 4)
2239 {
2240 if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2241 {
2242 return retval;
2243 }
2244 }
2245 else
2246 {
2247 if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2248 {
2249 return retval;
2250 }
2251 }
2252 breakpoint->set = 0;
2253 }
2254
2255 return ERROR_OK;
2256 }
2257
2258 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2259 {
2260 struct xscale_common *xscale = target_to_xscale(target);
2261
2262 if (target->state != TARGET_HALTED)
2263 {
2264 LOG_WARNING("target not halted");
2265 return ERROR_TARGET_NOT_HALTED;
2266 }
2267
2268 if (breakpoint->set)
2269 {
2270 xscale_unset_breakpoint(target, breakpoint);
2271 }
2272
2273 if (breakpoint->type == BKPT_HARD)
2274 xscale->ibcr_available++;
2275
2276 return ERROR_OK;
2277 }
2278
2279 static int xscale_set_watchpoint(struct target *target,
2280 struct watchpoint *watchpoint)
2281 {
2282 struct xscale_common *xscale = target_to_xscale(target);
2283 uint8_t enable = 0;
2284 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2285 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2286
2287 if (target->state != TARGET_HALTED)
2288 {
2289 LOG_WARNING("target not halted");
2290 return ERROR_TARGET_NOT_HALTED;
2291 }
2292
2293 xscale_get_reg(dbcon);
2294
2295 switch (watchpoint->rw)
2296 {
2297 case WPT_READ:
2298 enable = 0x3;
2299 break;
2300 case WPT_ACCESS:
2301 enable = 0x2;
2302 break;
2303 case WPT_WRITE:
2304 enable = 0x1;
2305 break;
2306 default:
2307 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2308 }
2309
2310 if (!xscale->dbr0_used)
2311 {
2312 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2313 dbcon_value |= enable;
2314 xscale_set_reg_u32(dbcon, dbcon_value);
2315 watchpoint->set = 1;
2316 xscale->dbr0_used = 1;
2317 }
2318 else if (!xscale->dbr1_used)
2319 {
2320 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2321 dbcon_value |= enable << 2;
2322 xscale_set_reg_u32(dbcon, dbcon_value);
2323 watchpoint->set = 2;
2324 xscale->dbr1_used = 1;
2325 }
2326 else
2327 {
2328 LOG_ERROR("BUG: no hardware comparator available");
2329 return ERROR_OK;
2330 }
2331
2332 return ERROR_OK;
2333 }
2334
2335 static int xscale_add_watchpoint(struct target *target,
2336 struct watchpoint *watchpoint)
2337 {
2338 struct xscale_common *xscale = target_to_xscale(target);
2339
2340 if (xscale->dbr_available < 1)
2341 {
2342 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2343 }
2344
2345 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2346 {
2347 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2348 }
2349
2350 xscale->dbr_available--;
2351
2352 return ERROR_OK;
2353 }
2354
2355 static int xscale_unset_watchpoint(struct target *target,
2356 struct watchpoint *watchpoint)
2357 {
2358 struct xscale_common *xscale = target_to_xscale(target);
2359 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2360 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2361
2362 if (target->state != TARGET_HALTED)
2363 {
2364 LOG_WARNING("target not halted");
2365 return ERROR_TARGET_NOT_HALTED;
2366 }
2367
2368 if (!watchpoint->set)
2369 {
2370 LOG_WARNING("breakpoint not set");
2371 return ERROR_OK;
2372 }
2373
2374 if (watchpoint->set == 1)
2375 {
2376 dbcon_value &= ~0x3;
2377 xscale_set_reg_u32(dbcon, dbcon_value);
2378 xscale->dbr0_used = 0;
2379 }
2380 else if (watchpoint->set == 2)
2381 {
2382 dbcon_value &= ~0xc;
2383 xscale_set_reg_u32(dbcon, dbcon_value);
2384 xscale->dbr1_used = 0;
2385 }
2386 watchpoint->set = 0;
2387
2388 return ERROR_OK;
2389 }
2390
2391 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2392 {
2393 struct xscale_common *xscale = target_to_xscale(target);
2394
2395 if (target->state != TARGET_HALTED)
2396 {
2397 LOG_WARNING("target not halted");
2398 return ERROR_TARGET_NOT_HALTED;
2399 }
2400
2401 if (watchpoint->set)
2402 {
2403 xscale_unset_watchpoint(target, watchpoint);
2404 }
2405
2406 xscale->dbr_available++;
2407
2408 return ERROR_OK;
2409 }
2410
2411 static int xscale_get_reg(struct reg *reg)
2412 {
2413 struct xscale_reg *arch_info = reg->arch_info;
2414 struct target *target = arch_info->target;
2415 struct xscale_common *xscale = target_to_xscale(target);
2416
2417 /* DCSR, TX and RX are accessible via JTAG */
2418 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2419 {
2420 return xscale_read_dcsr(arch_info->target);
2421 }
2422 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2423 {
2424 /* 1 = consume register content */
2425 return xscale_read_tx(arch_info->target, 1);
2426 }
2427 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2428 {
2429 /* can't read from RX register (host -> debug handler) */
2430 return ERROR_OK;
2431 }
2432 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2433 {
2434 /* can't (explicitly) read from TXRXCTRL register */
2435 return ERROR_OK;
2436 }
2437 else /* Other DBG registers have to be transfered by the debug handler */
2438 {
2439 /* send CP read request (command 0x40) */
2440 xscale_send_u32(target, 0x40);
2441
2442 /* send CP register number */
2443 xscale_send_u32(target, arch_info->dbg_handler_number);
2444
2445 /* read register value */
2446 xscale_read_tx(target, 1);
2447 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2448
2449 reg->dirty = 0;
2450 reg->valid = 1;
2451 }
2452
2453 return ERROR_OK;
2454 }
2455
2456 static int xscale_set_reg(struct reg *reg, uint8_t* buf)
2457 {
2458 struct xscale_reg *arch_info = reg->arch_info;
2459 struct target *target = arch_info->target;
2460 struct xscale_common *xscale = target_to_xscale(target);
2461 uint32_t value = buf_get_u32(buf, 0, 32);
2462
2463 /* DCSR, TX and RX are accessible via JTAG */
2464 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2465 {
2466 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2467 return xscale_write_dcsr(arch_info->target, -1, -1);
2468 }
2469 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2470 {
2471 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2472 return xscale_write_rx(arch_info->target);
2473 }
2474 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2475 {
2476 /* can't write to TX register (debug-handler -> host) */
2477 return ERROR_OK;
2478 }
2479 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2480 {
2481 /* can't (explicitly) write to TXRXCTRL register */
2482 return ERROR_OK;
2483 }
2484 else /* Other DBG registers have to be transfered by the debug handler */
2485 {
2486 /* send CP write request (command 0x41) */
2487 xscale_send_u32(target, 0x41);
2488
2489 /* send CP register number */
2490 xscale_send_u32(target, arch_info->dbg_handler_number);
2491
2492 /* send CP register value */
2493 xscale_send_u32(target, value);
2494 buf_set_u32(reg->value, 0, 32, value);
2495 }
2496
2497 return ERROR_OK;
2498 }
2499
2500 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2501 {
2502 struct xscale_common *xscale = target_to_xscale(target);
2503 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2504 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2505
2506 /* send CP write request (command 0x41) */
2507 xscale_send_u32(target, 0x41);
2508
2509 /* send CP register number */
2510 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2511
2512 /* send CP register value */
2513 xscale_send_u32(target, value);
2514 buf_set_u32(dcsr->value, 0, 32, value);
2515
2516 return ERROR_OK;
2517 }
2518
2519 static int xscale_read_trace(struct target *target)
2520 {
2521 struct xscale_common *xscale = target_to_xscale(target);
2522 struct arm *armv4_5 = &xscale->armv4_5_common;
2523 struct xscale_trace_data **trace_data_p;
2524
2525 /* 258 words from debug handler
2526 * 256 trace buffer entries
2527 * 2 checkpoint addresses
2528 */
2529 uint32_t trace_buffer[258];
2530 int is_address[256];
2531 int i, j;
2532
2533 if (target->state != TARGET_HALTED)
2534 {
2535 LOG_WARNING("target must be stopped to read trace data");
2536 return ERROR_TARGET_NOT_HALTED;
2537 }
2538
2539 /* send read trace buffer command (command 0x61) */
2540 xscale_send_u32(target, 0x61);
2541
2542 /* receive trace buffer content */
2543 xscale_receive(target, trace_buffer, 258);
2544
2545 /* parse buffer backwards to identify address entries */
2546 for (i = 255; i >= 0; i--)
2547 {
2548 is_address[i] = 0;
2549 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2550 ((trace_buffer[i] & 0xf0) == 0xd0))
2551 {
2552 if (i >= 3)
2553 is_address[--i] = 1;
2554 if (i >= 2)
2555 is_address[--i] = 1;
2556 if (i >= 1)
2557 is_address[--i] = 1;
2558 if (i >= 0)
2559 is_address[--i] = 1;
2560 }
2561 }
2562
2563
2564 /* search first non-zero entry */
2565 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2566 ;
2567
2568 if (j == 256)
2569 {
2570 LOG_DEBUG("no trace data collected");
2571 return ERROR_XSCALE_NO_TRACE_DATA;
2572 }
2573
2574 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2575 ;
2576
2577 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2578 (*trace_data_p)->next = NULL;
2579 (*trace_data_p)->chkpt0 = trace_buffer[256];
2580 (*trace_data_p)->chkpt1 = trace_buffer[257];
2581 (*trace_data_p)->last_instruction =
2582 buf_get_u32(armv4_5->pc->value, 0, 32);
2583 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2584 (*trace_data_p)->depth = 256 - j;
2585
2586 for (i = j; i < 256; i++)
2587 {
2588 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2589 if (is_address[i])
2590 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2591 else
2592 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2593 }
2594
2595 return ERROR_OK;
2596 }
2597
2598 static int xscale_read_instruction(struct target *target,
2599 struct arm_instruction *instruction)
2600 {
2601 struct xscale_common *xscale = target_to_xscale(target);
2602 int i;
2603 int section = -1;
2604 size_t size_read;
2605 uint32_t opcode;
2606 int retval;
2607
2608 if (!xscale->trace.image)
2609 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2610
2611 /* search for the section the current instruction belongs to */
2612 for (i = 0; i < xscale->trace.image->num_sections; i++)
2613 {
2614 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2615 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2616 {
2617 section = i;
2618 break;
2619 }
2620 }
2621
2622 if (section == -1)
2623 {
2624 /* current instruction couldn't be found in the image */
2625 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2626 }
2627
2628 if (xscale->trace.core_state == ARM_STATE_ARM)
2629 {
2630 uint8_t buf[4];
2631 if ((retval = image_read_section(xscale->trace.image, section,
2632 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2633 4, buf, &size_read)) != ERROR_OK)
2634 {
2635 LOG_ERROR("error while reading instruction: %i", retval);
2636 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2637 }
2638 opcode = target_buffer_get_u32(target, buf);
2639 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2640 }
2641 else if (xscale->trace.core_state == ARM_STATE_THUMB)
2642 {
2643 uint8_t buf[2];
2644 if ((retval = image_read_section(xscale->trace.image, section,
2645 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2646 2, buf, &size_read)) != ERROR_OK)
2647 {
2648 LOG_ERROR("error while reading instruction: %i", retval);
2649 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2650 }
2651 opcode = target_buffer_get_u16(target, buf);
2652 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2653 }
2654 else
2655 {
2656 LOG_ERROR("BUG: unknown core state encountered");
2657 exit(-1);
2658 }
2659
2660 return ERROR_OK;
2661 }
2662
2663 static int xscale_branch_address(struct xscale_trace_data *trace_data,
2664 int i, uint32_t *target)
2665 {
2666 /* if there are less than four entries prior to the indirect branch message
2667 * we can't extract the address */
2668 if (i < 4)
2669 {
2670 return -1;
2671 }
2672
2673 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2674 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2675
2676 return 0;
2677 }
2678
2679 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2680 {
2681 struct xscale_common *xscale = target_to_xscale(target);
2682 int next_pc_ok = 0;
2683 uint32_t next_pc = 0x0;
2684 struct xscale_trace_data *trace_data = xscale->trace.data;
2685 int retval;
2686
2687 while (trace_data)
2688 {
2689 int i, chkpt;
2690 int rollover;
2691 int branch;
2692 int exception;
2693 xscale->trace.core_state = ARM_STATE_ARM;
2694
2695 chkpt = 0;
2696 rollover = 0;
2697
2698 for (i = 0; i < trace_data->depth; i++)
2699 {
2700 next_pc_ok = 0;
2701 branch = 0;
2702 exception = 0;
2703
2704 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2705 continue;
2706
2707 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2708 {
2709 case 0: /* Exceptions */
2710 case 1:
2711 case 2:
2712 case 3:
2713 case 4:
2714 case 5:
2715 case 6:
2716 case 7:
2717 exception = (trace_data->entries[i].data & 0x70) >> 4;
2718 next_pc_ok = 1;
2719 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2720 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2721 break;
2722 case 8: /* Direct Branch */
2723 branch = 1;
2724 break;
2725 case 9: /* Indirect Branch */
2726 branch = 1;
2727 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2728 {
2729 next_pc_ok = 1;
2730 }
2731 break;
2732 case 13: /* Checkpointed Indirect Branch */
2733 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2734 {
2735 next_pc_ok = 1;
2736 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2737 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2738 LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2739 }
2740 /* explicit fall-through */
2741 case 12: /* Checkpointed Direct Branch */
2742 branch = 1;
2743 if (chkpt == 0)
2744 {
2745 next_pc_ok = 1;
2746 next_pc = trace_data->chkpt0;
2747 chkpt++;
2748 }
2749 else if (chkpt == 1)
2750 {
2751 next_pc_ok = 1;
2752 next_pc = trace_data->chkpt0;
2753 chkpt++;
2754 }
2755 else
2756 {
2757 LOG_WARNING("more than two checkpointed branches encountered");
2758 }
2759 break;
2760 case 15: /* Roll-over */
2761 rollover++;
2762 continue;
2763 default: /* Reserved */
2764 command_print(cmd_ctx, "--- reserved trace message ---");
2765 LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2766 return ERROR_OK;
2767 }
2768
2769 if (xscale->trace.pc_ok)
2770 {
2771 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2772 struct arm_instruction instruction;
2773
2774 if ((exception == 6) || (exception == 7))
2775 {
2776 /* IRQ or FIQ exception, no instruction executed */
2777 executed -= 1;
2778 }
2779
2780 while (executed-- >= 0)
2781 {
2782 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2783 {
2784 /* can't continue tracing with no image available */
2785 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2786 {
2787 return retval;
2788 }
2789 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2790 {
2791 /* TODO: handle incomplete images */
2792 }
2793 }
2794
2795 /* a precise abort on a load to the PC is included in the incremental
2796 * word count, other instructions causing data aborts are not included
2797 */
2798 if ((executed == 0) && (exception == 4)
2799 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2800 {
2801 if ((instruction.type == ARM_LDM)
2802 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2803 {
2804 executed--;
2805 }
2806 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2807 && (instruction.info.load_store.Rd != 15))
2808 {
2809 executed--;
2810 }
2811 }
2812
2813 /* only the last instruction executed
2814 * (the one that caused the control flow change)
2815 * could be a taken branch
2816 */
2817 if (((executed == -1) && (branch == 1)) &&
2818 (((instruction.type == ARM_B) ||
2819 (instruction.type == ARM_BL) ||
2820 (instruction.type == ARM_BLX)) &&
2821 (instruction.info.b_bl_bx_blx.target_address != 0xffffffff)))
2822 {
2823 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2824 }
2825 else
2826 {
2827 xscale->trace.current_pc += (xscale->trace.core_state == ARM_STATE_ARM) ? 4 : 2;
2828 }
2829 command_print(cmd_ctx, "%s", instruction.text);
2830 }
2831
2832 rollover = 0;
2833 }
2834
2835 if (next_pc_ok)
2836 {
2837 xscale->trace.current_pc = next_pc;
2838 xscale->trace.pc_ok = 1;
2839 }
2840 }
2841
2842 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARM_STATE_ARM) ? 4 : 2)
2843 {
2844 struct arm_instruction instruction;
2845 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2846 {
2847 /* can't continue tracing with no image available */
2848 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2849 {
2850 return retval;
2851 }
2852 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2853 {
2854 /* TODO: handle incomplete images */
2855 }
2856 }
2857 command_print(cmd_ctx, "%s", instruction.text);
2858 }
2859
2860 trace_data = trace_data->next;
2861 }
2862
2863 return ERROR_OK;
2864 }
2865
2866 static const struct reg_arch_type xscale_reg_type = {
2867 .get = xscale_get_reg,
2868 .set = xscale_set_reg,
2869 };
2870
2871 static void xscale_build_reg_cache(struct target *target)
2872 {
2873 struct xscale_common *xscale = target_to_xscale(target);
2874 struct arm *armv4_5 = &xscale->armv4_5_common;
2875 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2876 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2877 int i;
2878 int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
2879
2880 (*cache_p) = arm_build_reg_cache(target, armv4_5);
2881
2882 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2883 cache_p = &(*cache_p)->next;
2884
2885 /* fill in values for the xscale reg cache */
2886 (*cache_p)->name = "XScale registers";
2887 (*cache_p)->next = NULL;
2888 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
2889 (*cache_p)->num_regs = num_regs;
2890
2891 for (i = 0; i < num_regs; i++)
2892 {
2893 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2894 (*cache_p)->reg_list[i].value = calloc(4, 1);
2895 (*cache_p)->reg_list[i].dirty = 0;
2896 (*cache_p)->reg_list[i].valid = 0;
2897 (*cache_p)->reg_list[i].size = 32;
2898 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2899 (*cache_p)->reg_list[i].type = &xscale_reg_type;
2900 arch_info[i] = xscale_reg_arch_info[i];
2901 arch_info[i].target = target;
2902 }
2903
2904 xscale->reg_cache = (*cache_p);
2905 }
2906
2907 static int xscale_init_target(struct command_context *cmd_ctx,
2908 struct target *target)
2909 {
2910 xscale_build_reg_cache(target);
2911 return ERROR_OK;
2912 }
2913
2914 static int xscale_init_arch_info(struct target *target,
2915 struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
2916 {
2917 struct arm *armv4_5;
2918 uint32_t high_reset_branch, low_reset_branch;
2919 int i;
2920
2921 armv4_5 = &xscale->armv4_5_common;
2922
2923 /* store architecture specfic data */
2924 xscale->common_magic = XSCALE_COMMON_MAGIC;
2925
2926 /* we don't really *need* a variant param ... */
2927 if (variant) {
2928 int ir_length = 0;
2929
2930 if (strcmp(variant, "pxa250") == 0
2931 || strcmp(variant, "pxa255") == 0
2932 || strcmp(variant, "pxa26x") == 0)
2933 ir_length = 5;
2934 else if (strcmp(variant, "pxa27x") == 0
2935 || strcmp(variant, "ixp42x") == 0
2936 || strcmp(variant, "ixp45x") == 0
2937 || strcmp(variant, "ixp46x") == 0)
2938 ir_length = 7;
2939 else if (strcmp(variant, "pxa3xx") == 0)
2940 ir_length = 11;
2941 else
2942 LOG_WARNING("%s: unrecognized variant %s",
2943 tap->dotted_name, variant);
2944
2945 if (ir_length && ir_length != tap->ir_length) {
2946 LOG_WARNING("%s: IR length for %s is %d; fixing",
2947 tap->dotted_name, variant, ir_length);
2948 tap->ir_length = ir_length;
2949 }
2950 }
2951
2952 /* PXA3xx shifts the JTAG instructions */
2953 if (tap->ir_length == 11)
2954 xscale->xscale_variant = XSCALE_PXA3XX;
2955 else
2956 xscale->xscale_variant = XSCALE_IXP4XX_PXA2XX;
2957
2958 /* the debug handler isn't installed (and thus not running) at this time */
2959 xscale->handler_address = 0xfe000800;
2960
2961 /* clear the vectors we keep locally for reference */
2962 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2963 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2964
2965 /* no user-specified vectors have been configured yet */
2966 xscale->static_low_vectors_set = 0x0;
2967 xscale->static_high_vectors_set = 0x0;
2968
2969 /* calculate branches to debug handler */
2970 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
2971 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
2972
2973 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
2974 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
2975
2976 for (i = 1; i <= 7; i++)
2977 {
2978 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2979 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2980 }
2981
2982 /* 64kB aligned region used for DCache cleaning */
2983 xscale->cache_clean_address = 0xfffe0000;
2984
2985 xscale->hold_rst = 0;
2986 xscale->external_debug_break = 0;
2987
2988 xscale->ibcr_available = 2;
2989 xscale->ibcr0_used = 0;
2990 xscale->ibcr1_used = 0;
2991
2992 xscale->dbr_available = 2;
2993 xscale->dbr0_used = 0;
2994 xscale->dbr1_used = 0;
2995
2996 LOG_INFO("%s: hardware has 2 breakpoints and 2 watchpoints",
2997 target_name(target));
2998
2999 xscale->arm_bkpt = ARMV5_BKPT(0x0);
3000 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
3001
3002 xscale->vector_catch = 0x1;
3003
3004 xscale->trace.capture_status = TRACE_IDLE;
3005 xscale->trace.data = NULL;
3006 xscale->trace.image = NULL;
3007 xscale->trace.buffer_enabled = 0;
3008 xscale->trace.buffer_fill = 0;
3009
3010 /* prepare ARMv4/5 specific information */
3011 armv4_5->arch_info = xscale;
3012 armv4_5->read_core_reg = xscale_read_core_reg;
3013 armv4_5->write_core_reg = xscale_write_core_reg;
3014 armv4_5->full_context = xscale_full_context;
3015
3016 arm_init_arch_info(target, armv4_5);
3017
3018 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
3019 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
3020 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
3021 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
3022 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
3023 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
3024 xscale->armv4_5_mmu.has_tiny_pages = 1;
3025 xscale->armv4_5_mmu.mmu_enabled = 0;
3026
3027 return ERROR_OK;
3028 }
3029
3030 static int xscale_target_create(struct target *target, Jim_Interp *interp)
3031 {
3032 struct xscale_common *xscale;
3033
3034 if (sizeof xscale_debug_handler - 1 > 0x800) {
3035 LOG_ERROR("debug_handler.bin: larger than 2kb");
3036 return ERROR_FAIL;
3037 }
3038
3039 xscale = calloc(1, sizeof(*xscale));
3040 if (!xscale)
3041 return ERROR_FAIL;
3042
3043 return xscale_init_arch_info(target, xscale, target->tap,
3044 target->variant);
3045 }
3046
3047 COMMAND_HANDLER(xscale_handle_debug_handler_command)
3048 {
3049 struct target *target = NULL;
3050 struct xscale_common *xscale;
3051 int retval;
3052 uint32_t handler_address;
3053
3054 if (CMD_ARGC < 2)
3055 {
3056 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3057 return ERROR_OK;
3058 }
3059
3060 if ((target = get_target(CMD_ARGV[0])) == NULL)
3061 {
3062 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3063 return ERROR_FAIL;
3064 }
3065
3066 xscale = target_to_xscale(target);
3067 retval = xscale_verify_pointer(CMD_CTX, xscale);
3068 if (retval != ERROR_OK)
3069 return retval;
3070
3071 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3072
3073 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3074 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3075 {
3076 xscale->handler_address = handler_address;
3077 }
3078 else
3079 {
3080 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3081 return ERROR_FAIL;
3082 }
3083
3084 return ERROR_OK;
3085 }
3086
3087 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3088 {
3089 struct target *target = NULL;
3090 struct xscale_common *xscale;
3091 int retval;
3092 uint32_t cache_clean_address;
3093
3094 if (CMD_ARGC < 2)
3095 {
3096 return ERROR_COMMAND_SYNTAX_ERROR;
3097 }
3098
3099 target = get_target(CMD_ARGV[0]);
3100 if (target == NULL)
3101 {
3102 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3103 return ERROR_FAIL;
3104 }
3105 xscale = target_to_xscale(target);
3106 retval = xscale_verify_pointer(CMD_CTX, xscale);
3107 if (retval != ERROR_OK)
3108 return retval;
3109
3110 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3111
3112 if (cache_clean_address & 0xffff)
3113 {
3114 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3115 }
3116 else
3117 {
3118 xscale->cache_clean_address = cache_clean_address;
3119 }
3120
3121 return ERROR_OK;
3122 }
3123
3124 COMMAND_HANDLER(xscale_handle_cache_info_command)
3125 {
3126 struct target *target = get_current_target(CMD_CTX);
3127 struct xscale_common *xscale = target_to_xscale(target);
3128 int retval;
3129
3130 retval = xscale_verify_pointer(CMD_CTX, xscale);
3131 if (retval != ERROR_OK)
3132 return retval;
3133
3134 return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
3135 }
3136
3137 static int xscale_virt2phys(struct target *target,
3138 uint32_t virtual, uint32_t *physical)
3139 {
3140 struct xscale_common *xscale = target_to_xscale(target);
3141 int type;
3142 uint32_t cb;
3143 int domain;
3144 uint32_t ap;
3145
3146 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3147 LOG_ERROR(xscale_not);
3148 return ERROR_TARGET_INVALID;
3149 }
3150
3151 uint32_t ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3152 if (type == -1)
3153 {
3154 return ret;
3155 }
3156 *physical = ret;
3157 return ERROR_OK;
3158 }
3159
3160 static int xscale_mmu(struct target *target, int *enabled)
3161 {
3162 struct xscale_common *xscale = target_to_xscale(target);
3163
3164 if (target->state != TARGET_HALTED)
3165 {
3166 LOG_ERROR("Target not halted");
3167 return ERROR_TARGET_INVALID;
3168 }
3169 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3170 return ERROR_OK;
3171 }
3172
3173 COMMAND_HANDLER(xscale_handle_mmu_command)
3174 {
3175 struct target *target = get_current_target(CMD_CTX);
3176 struct xscale_common *xscale = target_to_xscale(target);
3177 int retval;
3178
3179 retval = xscale_verify_pointer(CMD_CTX, xscale);
3180 if (retval != ERROR_OK)
3181 return retval;
3182
3183 if (target->state != TARGET_HALTED)
3184 {
3185 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3186 return ERROR_OK;
3187 }
3188
3189 if (CMD_ARGC >= 1)
3190 {
3191 bool enable;
3192 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3193 if (enable)
3194 xscale_enable_mmu_caches(target, 1, 0, 0);
3195 else
3196 xscale_disable_mmu_caches(target, 1, 0, 0);
3197 xscale->armv4_5_mmu.mmu_enabled = enable;
3198 }
3199
3200 command_print(CMD_CTX, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3201
3202 return ERROR_OK;
3203 }
3204
3205 COMMAND_HANDLER(xscale_handle_idcache_command)
3206 {
3207 struct target *target = get_current_target(CMD_CTX);
3208 struct xscale_common *xscale = target_to_xscale(target);
3209
3210 int retval = xscale_verify_pointer(CMD_CTX, xscale);
3211 if (retval != ERROR_OK)
3212 return retval;
3213
3214 if (target->state != TARGET_HALTED)
3215 {
3216 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3217 return ERROR_OK;
3218 }
3219
3220 bool icache;
3221 COMMAND_PARSE_BOOL(CMD_NAME, icache, "icache", "dcache");
3222
3223 if (CMD_ARGC >= 1)
3224 {
3225 bool enable;
3226 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3227 if (enable)
3228 xscale_enable_mmu_caches(target, 1, 0, 0);
3229 else
3230 xscale_disable_mmu_caches(target, 1, 0, 0);
3231 if (icache)
3232 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3233 else
3234 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3235 }
3236
3237 bool enabled = icache ?
3238 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3239 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3240 const char *msg = enabled ? "enabled" : "disabled";
3241 command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
3242
3243 return ERROR_OK;
3244 }
3245
3246 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3247 {
3248 struct target *target = get_current_target(CMD_CTX);
3249 struct xscale_common *xscale = target_to_xscale(target);
3250 int retval;
3251
3252 retval = xscale_verify_pointer(CMD_CTX, xscale);
3253 if (retval != ERROR_OK)
3254 return retval;
3255
3256 if (CMD_ARGC < 1)
3257 {
3258 command_print(CMD_CTX, "usage: xscale vector_catch [mask]");
3259 }
3260 else
3261 {
3262 COMMAND_PARSE_NUMBER(u8, CMD_ARGV[0], xscale->vector_catch);
3263 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3264 xscale_write_dcsr(target, -1, -1);
3265 }
3266
3267 command_print(CMD_CTX, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3268
3269 return ERROR_OK;
3270 }
3271
3272
3273 COMMAND_HANDLER(xscale_handle_vector_table_command)
3274 {
3275 struct target *target = get_current_target(CMD_CTX);
3276 struct xscale_common *xscale = target_to_xscale(target);
3277 int err = 0;
3278 int retval;
3279
3280 retval = xscale_verify_pointer(CMD_CTX, xscale);
3281 if (retval != ERROR_OK)
3282 return retval;
3283
3284 if (CMD_ARGC == 0) /* print current settings */
3285 {
3286 int idx;
3287
3288 command_print(CMD_CTX, "active user-set static vectors:");
3289 for (idx = 1; idx < 8; idx++)
3290 if (xscale->static_low_vectors_set & (1 << idx))
3291 command_print(CMD_CTX, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
3292 for (idx = 1; idx < 8; idx++)
3293 if (xscale->static_high_vectors_set & (1 << idx))
3294 command_print(CMD_CTX, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
3295 return ERROR_OK;
3296 }
3297
3298 if (CMD_ARGC != 3)
3299 err = 1;
3300 else
3301 {
3302 int idx;
3303 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3304 uint32_t vec;
3305 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3306
3307 if (idx < 1 || idx >= 8)
3308 err = 1;
3309
3310 if (!err && strcmp(CMD_ARGV[0], "low") == 0)
3311 {
3312 xscale->static_low_vectors_set |= (1<<idx);
3313 xscale->static_low_vectors[idx] = vec;
3314 }
3315 else if (!err && (strcmp(CMD_ARGV[0], "high") == 0))
3316 {
3317 xscale->static_high_vectors_set |= (1<<idx);
3318 xscale->static_high_vectors[idx] = vec;
3319 }
3320 else
3321 err = 1;
3322 }
3323
3324 if (err)
3325 command_print(CMD_CTX, "usage: xscale vector_table <high|low> <index> <code>");
3326
3327 return ERROR_OK;
3328 }
3329
3330
3331 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3332 {
3333 struct target *target = get_current_target(CMD_CTX);
3334 struct xscale_common *xscale = target_to_xscale(target);
3335 struct arm *armv4_5 = &xscale->armv4_5_common;
3336 uint32_t dcsr_value;
3337 int retval;
3338
3339 retval = xscale_verify_pointer(CMD_CTX, xscale);
3340 if (retval != ERROR_OK)
3341 return retval;
3342
3343 if (target->state != TARGET_HALTED)
3344 {
3345 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3346 return ERROR_OK;
3347 }
3348
3349 if ((CMD_ARGC >= 1) && (strcmp("enable", CMD_ARGV[0]) == 0))
3350 {
3351 struct xscale_trace_data *td, *next_td;
3352 xscale->trace.buffer_enabled = 1;
3353
3354 /* free old trace data */
3355 td = xscale->trace.data;
3356 while (td)
3357 {
3358 next_td = td->next;
3359
3360 if (td->entries)
3361 free(td->entries);
3362 free(td);
3363 td = next_td;
3364 }
3365 xscale->trace.data = NULL;
3366 }
3367 else if ((CMD_ARGC >= 1) && (strcmp("disable", CMD_ARGV[0]) == 0))
3368 {
3369 xscale->trace.buffer_enabled = 0;
3370 }
3371
3372 if ((CMD_ARGC >= 2) && (strcmp("fill", CMD_ARGV[1]) == 0))
3373 {
3374 uint32_t fill = 1;
3375 if (CMD_ARGC >= 3)
3376 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], fill);
3377 xscale->trace.buffer_fill = fill;
3378 }
3379 else if ((CMD_ARGC >= 2) && (strcmp("wrap", CMD_ARGV[1]) == 0))
3380 {
3381 xscale->trace.buffer_fill = -1;
3382 }
3383
3384 if (xscale->trace.buffer_enabled)
3385 {
3386 /* if we enable the trace buffer in fill-once
3387 * mode we know the address of the first instruction */
3388 xscale->trace.pc_ok = 1;
3389 xscale->trace.current_pc =
3390 buf_get_u32(armv4_5->pc->value, 0, 32);
3391 }
3392 else
3393 {
3394 /* otherwise the address is unknown, and we have no known good PC */
3395 xscale->trace.pc_ok = 0;
3396 }
3397
3398 command_print(CMD_CTX, "trace buffer %s (%s)",
3399 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3400 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3401
3402 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3403 if (xscale->trace.buffer_fill >= 0)
3404 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3405 else
3406 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3407
3408 return ERROR_OK;
3409 }
3410
3411 COMMAND_HANDLER(xscale_handle_trace_image_command)
3412 {
3413 struct target *target = get_current_target(CMD_CTX);
3414 struct xscale_common *xscale = target_to_xscale(target);
3415 int retval;
3416
3417 if (CMD_ARGC < 1)
3418 {
3419 command_print(CMD_CTX, "usage: xscale trace_image <file> [base address] [type]");
3420 return ERROR_OK;
3421 }
3422
3423 retval = xscale_verify_pointer(CMD_CTX, xscale);
3424 if (retval != ERROR_OK)
3425 return retval;
3426
3427 if (xscale->trace.image)
3428 {
3429 image_close(xscale->trace.image);
3430 free(xscale->trace.image);
3431 command_print(CMD_CTX, "previously loaded image found and closed");
3432 }
3433
3434 xscale->trace.image = malloc(sizeof(struct image));
3435 xscale->trace.image->base_address_set = 0;
3436 xscale->trace.image->start_address_set = 0;
3437
3438 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3439 if (CMD_ARGC >= 2)
3440 {
3441 xscale->trace.image->base_address_set = 1;
3442 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], xscale->trace.image->base_address);
3443 }
3444 else
3445 {
3446 xscale->trace.image->base_address_set = 0;
3447 }
3448
3449 if (image_open(xscale->trace.image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3450 {
3451 free(xscale->trace.image);
3452 xscale->trace.image = NULL;
3453 return ERROR_OK;
3454 }
3455
3456 return ERROR_OK;
3457 }
3458
3459 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3460 {
3461 struct target *target = get_current_target(CMD_CTX);
3462 struct xscale_common *xscale = target_to_xscale(target);
3463 struct xscale_trace_data *trace_data;
3464 struct fileio file;
3465 int retval;
3466
3467 retval = xscale_verify_pointer(CMD_CTX, xscale);
3468 if (retval != ERROR_OK)
3469 return retval;
3470
3471 if (target->state != TARGET_HALTED)
3472 {
3473 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3474 return ERROR_OK;
3475 }
3476
3477 if (CMD_ARGC < 1)
3478 {
3479 command_print(CMD_CTX, "usage: xscale dump_trace <file>");
3480 return ERROR_OK;
3481 }
3482
3483 trace_data = xscale->trace.data;
3484
3485 if (!trace_data)
3486 {
3487 command_print(CMD_CTX, "no trace data collected");
3488 return ERROR_OK;
3489 }
3490
3491 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3492 {
3493 return ERROR_OK;
3494 }
3495
3496 while (trace_data)
3497 {
3498 int i;
3499
3500 fileio_write_u32(&file, trace_data->chkpt0);
3501 fileio_write_u32(&file, trace_data->chkpt1);
3502 fileio_write_u32(&file, trace_data->last_instruction);
3503 fileio_write_u32(&file, trace_data->depth);
3504
3505 for (i = 0; i < trace_data->depth; i++)
3506 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3507
3508 trace_data = trace_data->next;
3509 }
3510
3511 fileio_close(&file);
3512
3513 return ERROR_OK;
3514 }
3515
3516 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3517 {
3518 struct target *target = get_current_target(CMD_CTX);
3519 struct xscale_common *xscale = target_to_xscale(target);
3520 int retval;
3521
3522 retval = xscale_verify_pointer(CMD_CTX, xscale);
3523 if (retval != ERROR_OK)
3524 return retval;
3525
3526 xscale_analyze_trace(target, CMD_CTX);
3527
3528 return ERROR_OK;
3529 }
3530
3531 COMMAND_HANDLER(xscale_handle_cp15)
3532 {
3533 struct target *target = get_current_target(CMD_CTX);
3534 struct xscale_common *xscale = target_to_xscale(target);
3535 int retval;
3536
3537 retval = xscale_verify_pointer(CMD_CTX, xscale);
3538 if (retval != ERROR_OK)
3539 return retval;
3540
3541 if (target->state != TARGET_HALTED)
3542 {
3543 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3544 return ERROR_OK;
3545 }
3546 uint32_t reg_no = 0;
3547 struct reg *reg = NULL;
3548 if (CMD_ARGC > 0)
3549 {
3550 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3551 /*translate from xscale cp15 register no to openocd register*/
3552 switch (reg_no)
3553 {
3554 case 0:
3555 reg_no = XSCALE_MAINID;
3556 break;
3557 case 1:
3558 reg_no = XSCALE_CTRL;
3559 break;
3560 case 2:
3561 reg_no = XSCALE_TTB;
3562 break;
3563 case 3:
3564 reg_no = XSCALE_DAC;
3565 break;
3566 case 5:
3567 reg_no = XSCALE_FSR;
3568 break;
3569 case 6:
3570 reg_no = XSCALE_FAR;
3571 break;
3572 case 13:
3573 reg_no = XSCALE_PID;
3574 break;
3575 case 15:
3576 reg_no = XSCALE_CPACCESS;
3577 break;
3578 default:
3579 command_print(CMD_CTX, "invalid register number");
3580 return ERROR_INVALID_ARGUMENTS;
3581 }
3582 reg = &xscale->reg_cache->reg_list[reg_no];
3583
3584 }
3585 if (CMD_ARGC == 1)
3586 {
3587 uint32_t value;
3588
3589 /* read cp15 control register */
3590 xscale_get_reg(reg);
3591 value = buf_get_u32(reg->value, 0, 32);
3592 command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
3593 }
3594 else if (CMD_ARGC == 2)
3595 {
3596 uint32_t value;
3597 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3598
3599 /* send CP write request (command 0x41) */
3600 xscale_send_u32(target, 0x41);
3601
3602 /* send CP register number */
3603 xscale_send_u32(target, reg_no);
3604
3605 /* send CP register value */
3606 xscale_send_u32(target, value);
3607
3608 /* execute cpwait to ensure outstanding operations complete */
3609 xscale_send_u32(target, 0x53);
3610 }
3611 else
3612 {
3613 command_print(CMD_CTX, "usage: cp15 [register]<, [value]>");
3614 }
3615
3616 return ERROR_OK;
3617 }
3618
3619 static const struct command_registration xscale_exec_command_handlers[] = {
3620 {
3621 .name = "cache_info",
3622 .handler = xscale_handle_cache_info_command,
3623 .mode = COMMAND_EXEC,
3624 .help = "display information about CPU caches",
3625 },
3626 {
3627 .name = "mmu",
3628 .handler = xscale_handle_mmu_command,
3629 .mode = COMMAND_EXEC,
3630 .help = "enable or disable the MMU",
3631 .usage = "['enable'|'disable']",
3632 },
3633 {
3634 .name = "icache",
3635 .handler = xscale_handle_idcache_command,
3636 .mode = COMMAND_EXEC,
3637 .help = "display ICache state, optionally enabling or "
3638 "disabling it",
3639 .usage = "['enable'|'disable']",
3640 },
3641 {
3642 .name = "dcache",
3643 .handler = xscale_handle_idcache_command,
3644 .mode = COMMAND_EXEC,
3645 .help = "display DCache state, optionally enabling or "
3646 "disabling it",
3647 .usage = "['enable'|'disable']",
3648 },
3649 {
3650 .name = "vector_catch",
3651 .handler = xscale_handle_vector_catch_command,
3652 .mode = COMMAND_EXEC,
3653 .help = "set or display 8-bit mask of vectors "
3654 "that should trigger debug entry",
3655 .usage = "[mask]",
3656 },
3657 {
3658 .name = "vector_table",
3659 .handler = xscale_handle_vector_table_command,
3660 .mode = COMMAND_EXEC,
3661 .help = "set vector table entry in mini-ICache, "
3662 "or display current tables",
3663 .usage = "[('high'|'low') index code]",
3664 },
3665 {
3666 .name = "trace_buffer",
3667 .handler = xscale_handle_trace_buffer_command,
3668 .mode = COMMAND_EXEC,
3669 .help = "display trace buffer status, enable or disable "
3670 "tracing, and optionally reconfigure trace mode",
3671 .usage = "['enable'|'disable' ['fill' number|'wrap']]",
3672 },
3673 {
3674 .name = "dump_trace",
3675 .handler = xscale_handle_dump_trace_command,
3676 .mode = COMMAND_EXEC,
3677 .help = "dump content of trace buffer to file",
3678 .usage = "filename",
3679 },
3680 {
3681 .name = "analyze_trace",
3682 .handler = xscale_handle_analyze_trace_buffer_command,
3683 .mode = COMMAND_EXEC,
3684 .help = "analyze content of trace buffer",
3685 .usage = "",
3686 },
3687 {
3688 .name = "trace_image",
3689 .handler = xscale_handle_trace_image_command,
3690 .mode = COMMAND_EXEC,
3691 .help = "load image from file to address (default 0)",
3692 .usage = "filename [offset [filetype]]",
3693 },
3694 {
3695 .name = "cp15",
3696 .handler = xscale_handle_cp15,
3697 .mode = COMMAND_EXEC,
3698 .help = "Read or write coprocessor 15 register.",
3699 .usage = "register [value]",
3700 },
3701 COMMAND_REGISTRATION_DONE
3702 };
3703 static const struct command_registration xscale_any_command_handlers[] = {
3704 {
3705 .name = "debug_handler",
3706 .handler = xscale_handle_debug_handler_command,
3707 .mode = COMMAND_ANY,
3708 .help = "Change address used for debug handler.",
3709 .usage = "target address",
3710 },
3711 {
3712 .name = "cache_clean_address",
3713 .handler = xscale_handle_cache_clean_address_command,
3714 .mode = COMMAND_ANY,
3715 .help = "Change address used for cleaning data cache.",
3716 .usage = "address",
3717 },
3718 {
3719 .chain = xscale_exec_command_handlers,
3720 },
3721 COMMAND_REGISTRATION_DONE
3722 };
3723 static const struct command_registration xscale_command_handlers[] = {
3724 {
3725 .chain = arm_command_handlers,
3726 },
3727 {
3728 .name = "xscale",
3729 .mode = COMMAND_ANY,
3730 .help = "xscale command group",
3731 .chain = xscale_any_command_handlers,
3732 },
3733 COMMAND_REGISTRATION_DONE
3734 };
3735
3736 struct target_type xscale_target =
3737 {
3738 .name = "xscale",
3739
3740 .poll = xscale_poll,
3741 .arch_state = xscale_arch_state,
3742
3743 .target_request_data = NULL,
3744
3745 .halt = xscale_halt,
3746 .resume = xscale_resume,
3747 .step = xscale_step,
3748
3749 .assert_reset = xscale_assert_reset,
3750 .deassert_reset = xscale_deassert_reset,
3751 .soft_reset_halt = NULL,
3752
3753 /* REVISIT on some cores, allow exporting iwmmxt registers ... */
3754 .get_gdb_reg_list = arm_get_gdb_reg_list,
3755
3756 .read_memory = xscale_read_memory,
3757 .read_phys_memory = xscale_read_phys_memory,
3758 .write_memory = xscale_write_memory,
3759 .write_phys_memory = xscale_write_phys_memory,
3760 .bulk_write_memory = xscale_bulk_write_memory,
3761
3762 .checksum_memory = arm_checksum_memory,
3763 .blank_check_memory = arm_blank_check_memory,
3764
3765 .run_algorithm = armv4_5_run_algorithm,
3766
3767 .add_breakpoint = xscale_add_breakpoint,
3768 .remove_breakpoint = xscale_remove_breakpoint,
3769 .add_watchpoint = xscale_add_watchpoint,
3770 .remove_watchpoint = xscale_remove_watchpoint,
3771
3772 .commands = xscale_command_handlers,
3773 .target_create = xscale_target_create,
3774 .init_target = xscale_init_target,
3775
3776 .virt2phys = xscale_virt2phys,
3777 .mmu = xscale_mmu
3778 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)