target: remove some more duplicate includes
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
29
30 #include "breakpoints.h"
31 #include "xscale.h"
32 #include "target_type.h"
33 #include "arm_jtag.h"
34 #include "arm_simulator.h"
35 #include "arm_disassembler.h"
36 #include "time_support.h"
37 #include "register.h"
38 #include "image.h"
39
40
41 /*
42 * Important XScale documents available as of October 2009 include:
43 *
44 * Intel XScale® Core Developer’s Manual, January 2004
45 * Order Number: 273473-002
46 * This has a chapter detailing debug facilities, and punts some
47 * details to chip-specific microarchitecture documents.
48 *
49 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
50 * Document Number: 273539-005
51 * Less detailed than the developer's manual, but summarizes those
52 * missing details (for most XScales) and gives LOTS of notes about
53 * debugger/handler interaction issues. Presents a simpler reset
54 * and load-handler sequence than the arch doc. (Note, OpenOCD
55 * doesn't currently support "Hot-Debug" as defined there.)
56 *
57 * Chip-specific microarchitecture documents may also be useful.
58 */
59
60
61 /* forward declarations */
62 static int xscale_resume(struct target *, int current,
63 uint32_t address, int handle_breakpoints, int debug_execution);
64 static int xscale_debug_entry(struct target *);
65 static int xscale_restore_context(struct target *);
66 static int xscale_get_reg(struct reg *reg);
67 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
68 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
69 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
70 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
71 static int xscale_read_trace(struct target *);
72
73
74 /* This XScale "debug handler" is loaded into the processor's
75 * mini-ICache, which is 2K of code writable only via JTAG.
76 *
77 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
78 * binary files cleanly. It's string oriented, and terminates them
79 * with a NUL character. Better would be to generate the constants
80 * and let other code decide names, scoping, and other housekeeping.
81 */
82 static /* unsigned const char xscale_debug_handler[] = ... */
83 #include "xscale_debug.h"
84
85 static char *const xscale_reg_list[] =
86 {
87 "XSCALE_MAINID", /* 0 */
88 "XSCALE_CACHETYPE",
89 "XSCALE_CTRL",
90 "XSCALE_AUXCTRL",
91 "XSCALE_TTB",
92 "XSCALE_DAC",
93 "XSCALE_FSR",
94 "XSCALE_FAR",
95 "XSCALE_PID",
96 "XSCALE_CPACCESS",
97 "XSCALE_IBCR0", /* 10 */
98 "XSCALE_IBCR1",
99 "XSCALE_DBR0",
100 "XSCALE_DBR1",
101 "XSCALE_DBCON",
102 "XSCALE_TBREG",
103 "XSCALE_CHKPT0",
104 "XSCALE_CHKPT1",
105 "XSCALE_DCSR",
106 "XSCALE_TX",
107 "XSCALE_RX", /* 20 */
108 "XSCALE_TXRXCTRL",
109 };
110
111 static const struct xscale_reg xscale_reg_arch_info[] =
112 {
113 {XSCALE_MAINID, NULL},
114 {XSCALE_CACHETYPE, NULL},
115 {XSCALE_CTRL, NULL},
116 {XSCALE_AUXCTRL, NULL},
117 {XSCALE_TTB, NULL},
118 {XSCALE_DAC, NULL},
119 {XSCALE_FSR, NULL},
120 {XSCALE_FAR, NULL},
121 {XSCALE_PID, NULL},
122 {XSCALE_CPACCESS, NULL},
123 {XSCALE_IBCR0, NULL},
124 {XSCALE_IBCR1, NULL},
125 {XSCALE_DBR0, NULL},
126 {XSCALE_DBR1, NULL},
127 {XSCALE_DBCON, NULL},
128 {XSCALE_TBREG, NULL},
129 {XSCALE_CHKPT0, NULL},
130 {XSCALE_CHKPT1, NULL},
131 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
132 {-1, NULL}, /* TX accessed via JTAG */
133 {-1, NULL}, /* RX accessed via JTAG */
134 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
135 };
136
137 static int xscale_reg_arch_type = -1;
138
139 /* convenience wrapper to access XScale specific registers */
140 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
141 {
142 uint8_t buf[4];
143
144 buf_set_u32(buf, 0, 32, value);
145
146 return xscale_set_reg(reg, buf);
147 }
148
149 static const char xscale_not[] = "target is not an XScale";
150
151 static int xscale_verify_pointer(struct command_context *cmd_ctx,
152 struct xscale_common *xscale)
153 {
154 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
155 command_print(cmd_ctx, xscale_not);
156 return ERROR_TARGET_INVALID;
157 }
158 return ERROR_OK;
159 }
160
161 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr)
162 {
163 if (tap == NULL)
164 return ERROR_FAIL;
165
166 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
167 {
168 struct scan_field field;
169 uint8_t scratch[4];
170
171 memset(&field, 0, sizeof field);
172 field.tap = tap;
173 field.num_bits = tap->ir_length;
174 field.out_value = scratch;
175 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
176
177 jtag_add_ir_scan(1, &field, jtag_get_end_state());
178 }
179
180 return ERROR_OK;
181 }
182
183 static int xscale_read_dcsr(struct target *target)
184 {
185 struct xscale_common *xscale = target_to_xscale(target);
186 int retval;
187 struct scan_field fields[3];
188 uint8_t field0 = 0x0;
189 uint8_t field0_check_value = 0x2;
190 uint8_t field0_check_mask = 0x7;
191 uint8_t field2 = 0x0;
192 uint8_t field2_check_value = 0x0;
193 uint8_t field2_check_mask = 0x1;
194
195 jtag_set_end_state(TAP_DRPAUSE);
196 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
197
198 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
199 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
200
201 memset(&fields, 0, sizeof fields);
202
203 fields[0].tap = target->tap;
204 fields[0].num_bits = 3;
205 fields[0].out_value = &field0;
206 uint8_t tmp;
207 fields[0].in_value = &tmp;
208
209 fields[1].tap = target->tap;
210 fields[1].num_bits = 32;
211 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
212
213 fields[2].tap = target->tap;
214 fields[2].num_bits = 1;
215 fields[2].out_value = &field2;
216 uint8_t tmp2;
217 fields[2].in_value = &tmp2;
218
219 jtag_add_dr_scan(3, fields, jtag_get_end_state());
220
221 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
222 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
223
224 if ((retval = jtag_execute_queue()) != ERROR_OK)
225 {
226 LOG_ERROR("JTAG error while reading DCSR");
227 return retval;
228 }
229
230 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
231 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
232
233 /* write the register with the value we just read
234 * on this second pass, only the first bit of field0 is guaranteed to be 0)
235 */
236 field0_check_mask = 0x1;
237 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
238 fields[1].in_value = NULL;
239
240 jtag_set_end_state(TAP_IDLE);
241
242 jtag_add_dr_scan(3, fields, jtag_get_end_state());
243
244 /* DANGER!!! this must be here. It will make sure that the arguments
245 * to jtag_set_check_value() does not go out of scope! */
246 return jtag_execute_queue();
247 }
248
249
250 static void xscale_getbuf(jtag_callback_data_t arg)
251 {
252 uint8_t *in = (uint8_t *)arg;
253 *((uint32_t *)in) = buf_get_u32(in, 0, 32);
254 }
255
256 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
257 {
258 if (num_words == 0)
259 return ERROR_INVALID_ARGUMENTS;
260
261 int retval = ERROR_OK;
262 tap_state_t path[3];
263 struct scan_field fields[3];
264 uint8_t *field0 = malloc(num_words * 1);
265 uint8_t field0_check_value = 0x2;
266 uint8_t field0_check_mask = 0x6;
267 uint32_t *field1 = malloc(num_words * 4);
268 uint8_t field2_check_value = 0x0;
269 uint8_t field2_check_mask = 0x1;
270 int words_done = 0;
271 int words_scheduled = 0;
272 int i;
273
274 path[0] = TAP_DRSELECT;
275 path[1] = TAP_DRCAPTURE;
276 path[2] = TAP_DRSHIFT;
277
278 memset(&fields, 0, sizeof fields);
279
280 fields[0].tap = target->tap;
281 fields[0].num_bits = 3;
282 fields[0].check_value = &field0_check_value;
283 fields[0].check_mask = &field0_check_mask;
284
285 fields[1].tap = target->tap;
286 fields[1].num_bits = 32;
287
288 fields[2].tap = target->tap;
289 fields[2].num_bits = 1;
290 fields[2].check_value = &field2_check_value;
291 fields[2].check_mask = &field2_check_mask;
292
293 jtag_set_end_state(TAP_IDLE);
294 xscale_jtag_set_instr(target->tap, XSCALE_DBGTX);
295 jtag_add_runtest(1, jtag_get_end_state()); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
296
297 /* repeat until all words have been collected */
298 int attempts = 0;
299 while (words_done < num_words)
300 {
301 /* schedule reads */
302 words_scheduled = 0;
303 for (i = words_done; i < num_words; i++)
304 {
305 fields[0].in_value = &field0[i];
306
307 jtag_add_pathmove(3, path);
308
309 fields[1].in_value = (uint8_t *)(field1 + i);
310
311 jtag_add_dr_scan_check(3, fields, jtag_set_end_state(TAP_IDLE));
312
313 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
314
315 words_scheduled++;
316 }
317
318 if ((retval = jtag_execute_queue()) != ERROR_OK)
319 {
320 LOG_ERROR("JTAG error while receiving data from debug handler");
321 break;
322 }
323
324 /* examine results */
325 for (i = words_done; i < num_words; i++)
326 {
327 if (!(field0[0] & 1))
328 {
329 /* move backwards if necessary */
330 int j;
331 for (j = i; j < num_words - 1; j++)
332 {
333 field0[j] = field0[j + 1];
334 field1[j] = field1[j + 1];
335 }
336 words_scheduled--;
337 }
338 }
339 if (words_scheduled == 0)
340 {
341 if (attempts++==1000)
342 {
343 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
344 retval = ERROR_TARGET_TIMEOUT;
345 break;
346 }
347 }
348
349 words_done += words_scheduled;
350 }
351
352 for (i = 0; i < num_words; i++)
353 *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
354
355 free(field1);
356
357 return retval;
358 }
359
360 static int xscale_read_tx(struct target *target, int consume)
361 {
362 struct xscale_common *xscale = target_to_xscale(target);
363 tap_state_t path[3];
364 tap_state_t noconsume_path[6];
365 int retval;
366 struct timeval timeout, now;
367 struct scan_field fields[3];
368 uint8_t field0_in = 0x0;
369 uint8_t field0_check_value = 0x2;
370 uint8_t field0_check_mask = 0x6;
371 uint8_t field2_check_value = 0x0;
372 uint8_t field2_check_mask = 0x1;
373
374 jtag_set_end_state(TAP_IDLE);
375
376 xscale_jtag_set_instr(target->tap, XSCALE_DBGTX);
377
378 path[0] = TAP_DRSELECT;
379 path[1] = TAP_DRCAPTURE;
380 path[2] = TAP_DRSHIFT;
381
382 noconsume_path[0] = TAP_DRSELECT;
383 noconsume_path[1] = TAP_DRCAPTURE;
384 noconsume_path[2] = TAP_DREXIT1;
385 noconsume_path[3] = TAP_DRPAUSE;
386 noconsume_path[4] = TAP_DREXIT2;
387 noconsume_path[5] = TAP_DRSHIFT;
388
389 memset(&fields, 0, sizeof fields);
390
391 fields[0].tap = target->tap;
392 fields[0].num_bits = 3;
393 fields[0].in_value = &field0_in;
394
395 fields[1].tap = target->tap;
396 fields[1].num_bits = 32;
397 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
398
399 fields[2].tap = target->tap;
400 fields[2].num_bits = 1;
401 uint8_t tmp;
402 fields[2].in_value = &tmp;
403
404 gettimeofday(&timeout, NULL);
405 timeval_add_time(&timeout, 1, 0);
406
407 for (;;)
408 {
409 /* if we want to consume the register content (i.e. clear TX_READY),
410 * we have to go straight from Capture-DR to Shift-DR
411 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
412 */
413 if (consume)
414 jtag_add_pathmove(3, path);
415 else
416 {
417 jtag_add_pathmove(sizeof(noconsume_path)/sizeof(*noconsume_path), noconsume_path);
418 }
419
420 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
421
422 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
423 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
424
425 if ((retval = jtag_execute_queue()) != ERROR_OK)
426 {
427 LOG_ERROR("JTAG error while reading TX");
428 return ERROR_TARGET_TIMEOUT;
429 }
430
431 gettimeofday(&now, NULL);
432 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
433 {
434 LOG_ERROR("time out reading TX register");
435 return ERROR_TARGET_TIMEOUT;
436 }
437 if (!((!(field0_in & 1)) && consume))
438 {
439 goto done;
440 }
441 if (debug_level >= 3)
442 {
443 LOG_DEBUG("waiting 100ms");
444 alive_sleep(100); /* avoid flooding the logs */
445 } else
446 {
447 keep_alive();
448 }
449 }
450 done:
451
452 if (!(field0_in & 1))
453 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
454
455 return ERROR_OK;
456 }
457
458 static int xscale_write_rx(struct target *target)
459 {
460 struct xscale_common *xscale = target_to_xscale(target);
461 int retval;
462 struct timeval timeout, now;
463 struct scan_field fields[3];
464 uint8_t field0_out = 0x0;
465 uint8_t field0_in = 0x0;
466 uint8_t field0_check_value = 0x2;
467 uint8_t field0_check_mask = 0x6;
468 uint8_t field2 = 0x0;
469 uint8_t field2_check_value = 0x0;
470 uint8_t field2_check_mask = 0x1;
471
472 jtag_set_end_state(TAP_IDLE);
473
474 xscale_jtag_set_instr(target->tap, XSCALE_DBGRX);
475
476 memset(&fields, 0, sizeof fields);
477
478 fields[0].tap = target->tap;
479 fields[0].num_bits = 3;
480 fields[0].out_value = &field0_out;
481 fields[0].in_value = &field0_in;
482
483 fields[1].tap = target->tap;
484 fields[1].num_bits = 32;
485 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
486
487 fields[2].tap = target->tap;
488 fields[2].num_bits = 1;
489 fields[2].out_value = &field2;
490 uint8_t tmp;
491 fields[2].in_value = &tmp;
492
493 gettimeofday(&timeout, NULL);
494 timeval_add_time(&timeout, 1, 0);
495
496 /* poll until rx_read is low */
497 LOG_DEBUG("polling RX");
498 for (;;)
499 {
500 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
501
502 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
503 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
504
505 if ((retval = jtag_execute_queue()) != ERROR_OK)
506 {
507 LOG_ERROR("JTAG error while writing RX");
508 return retval;
509 }
510
511 gettimeofday(&now, NULL);
512 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
513 {
514 LOG_ERROR("time out writing RX register");
515 return ERROR_TARGET_TIMEOUT;
516 }
517 if (!(field0_in & 1))
518 goto done;
519 if (debug_level >= 3)
520 {
521 LOG_DEBUG("waiting 100ms");
522 alive_sleep(100); /* avoid flooding the logs */
523 } else
524 {
525 keep_alive();
526 }
527 }
528 done:
529
530 /* set rx_valid */
531 field2 = 0x1;
532 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
533
534 if ((retval = jtag_execute_queue()) != ERROR_OK)
535 {
536 LOG_ERROR("JTAG error while writing RX");
537 return retval;
538 }
539
540 return ERROR_OK;
541 }
542
543 /* send count elements of size byte to the debug handler */
544 static int xscale_send(struct target *target, uint8_t *buffer, int count, int size)
545 {
546 uint32_t t[3];
547 int bits[3];
548 int retval;
549 int done_count = 0;
550
551 jtag_set_end_state(TAP_IDLE);
552
553 xscale_jtag_set_instr(target->tap, XSCALE_DBGRX);
554
555 bits[0]=3;
556 t[0]=0;
557 bits[1]=32;
558 t[2]=1;
559 bits[2]=1;
560 int endianness = target->endianness;
561 while (done_count++ < count)
562 {
563 switch (size)
564 {
565 case 4:
566 if (endianness == TARGET_LITTLE_ENDIAN)
567 {
568 t[1]=le_to_h_u32(buffer);
569 } else
570 {
571 t[1]=be_to_h_u32(buffer);
572 }
573 break;
574 case 2:
575 if (endianness == TARGET_LITTLE_ENDIAN)
576 {
577 t[1]=le_to_h_u16(buffer);
578 } else
579 {
580 t[1]=be_to_h_u16(buffer);
581 }
582 break;
583 case 1:
584 t[1]=buffer[0];
585 break;
586 default:
587 LOG_ERROR("BUG: size neither 4, 2 nor 1");
588 return ERROR_INVALID_ARGUMENTS;
589 }
590 jtag_add_dr_out(target->tap,
591 3,
592 bits,
593 t,
594 jtag_set_end_state(TAP_IDLE));
595 buffer += size;
596 }
597
598 if ((retval = jtag_execute_queue()) != ERROR_OK)
599 {
600 LOG_ERROR("JTAG error while sending data to debug handler");
601 return retval;
602 }
603
604 return ERROR_OK;
605 }
606
607 static int xscale_send_u32(struct target *target, uint32_t value)
608 {
609 struct xscale_common *xscale = target_to_xscale(target);
610
611 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
612 return xscale_write_rx(target);
613 }
614
615 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
616 {
617 struct xscale_common *xscale = target_to_xscale(target);
618 int retval;
619 struct scan_field fields[3];
620 uint8_t field0 = 0x0;
621 uint8_t field0_check_value = 0x2;
622 uint8_t field0_check_mask = 0x7;
623 uint8_t field2 = 0x0;
624 uint8_t field2_check_value = 0x0;
625 uint8_t field2_check_mask = 0x1;
626
627 if (hold_rst != -1)
628 xscale->hold_rst = hold_rst;
629
630 if (ext_dbg_brk != -1)
631 xscale->external_debug_break = ext_dbg_brk;
632
633 jtag_set_end_state(TAP_IDLE);
634 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
635
636 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
637 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
638
639 memset(&fields, 0, sizeof fields);
640
641 fields[0].tap = target->tap;
642 fields[0].num_bits = 3;
643 fields[0].out_value = &field0;
644 uint8_t tmp;
645 fields[0].in_value = &tmp;
646
647 fields[1].tap = target->tap;
648 fields[1].num_bits = 32;
649 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
650
651 fields[2].tap = target->tap;
652 fields[2].num_bits = 1;
653 fields[2].out_value = &field2;
654 uint8_t tmp2;
655 fields[2].in_value = &tmp2;
656
657 jtag_add_dr_scan(3, fields, jtag_get_end_state());
658
659 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
660 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
661
662 if ((retval = jtag_execute_queue()) != ERROR_OK)
663 {
664 LOG_ERROR("JTAG error while writing DCSR");
665 return retval;
666 }
667
668 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
669 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
670
671 return ERROR_OK;
672 }
673
674 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
675 static unsigned int parity (unsigned int v)
676 {
677 // unsigned int ov = v;
678 v ^= v >> 16;
679 v ^= v >> 8;
680 v ^= v >> 4;
681 v &= 0xf;
682 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
683 return (0x6996 >> v) & 1;
684 }
685
686 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
687 {
688 uint8_t packet[4];
689 uint8_t cmd;
690 int word;
691 struct scan_field fields[2];
692
693 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
694
695 /* LDIC into IR */
696 jtag_set_end_state(TAP_IDLE);
697 xscale_jtag_set_instr(target->tap, XSCALE_LDIC);
698
699 /* CMD is b011 to load a cacheline into the Mini ICache.
700 * Loading into the main ICache is deprecated, and unused.
701 * It's followed by three zero bits, and 27 address bits.
702 */
703 buf_set_u32(&cmd, 0, 6, 0x3);
704
705 /* virtual address of desired cache line */
706 buf_set_u32(packet, 0, 27, va >> 5);
707
708 memset(&fields, 0, sizeof fields);
709
710 fields[0].tap = target->tap;
711 fields[0].num_bits = 6;
712 fields[0].out_value = &cmd;
713
714 fields[1].tap = target->tap;
715 fields[1].num_bits = 27;
716 fields[1].out_value = packet;
717
718 jtag_add_dr_scan(2, fields, jtag_get_end_state());
719
720 /* rest of packet is a cacheline: 8 instructions, with parity */
721 fields[0].num_bits = 32;
722 fields[0].out_value = packet;
723
724 fields[1].num_bits = 1;
725 fields[1].out_value = &cmd;
726
727 for (word = 0; word < 8; word++)
728 {
729 buf_set_u32(packet, 0, 32, buffer[word]);
730
731 uint32_t value;
732 memcpy(&value, packet, sizeof(uint32_t));
733 cmd = parity(value);
734
735 jtag_add_dr_scan(2, fields, jtag_get_end_state());
736 }
737
738 return jtag_execute_queue();
739 }
740
741 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
742 {
743 uint8_t packet[4];
744 uint8_t cmd;
745 struct scan_field fields[2];
746
747 jtag_set_end_state(TAP_IDLE);
748 xscale_jtag_set_instr(target->tap, XSCALE_LDIC);
749
750 /* CMD for invalidate IC line b000, bits [6:4] b000 */
751 buf_set_u32(&cmd, 0, 6, 0x0);
752
753 /* virtual address of desired cache line */
754 buf_set_u32(packet, 0, 27, va >> 5);
755
756 memset(&fields, 0, sizeof fields);
757
758 fields[0].tap = target->tap;
759 fields[0].num_bits = 6;
760 fields[0].out_value = &cmd;
761
762 fields[1].tap = target->tap;
763 fields[1].num_bits = 27;
764 fields[1].out_value = packet;
765
766 jtag_add_dr_scan(2, fields, jtag_get_end_state());
767
768 return ERROR_OK;
769 }
770
771 static int xscale_update_vectors(struct target *target)
772 {
773 struct xscale_common *xscale = target_to_xscale(target);
774 int i;
775 int retval;
776
777 uint32_t low_reset_branch, high_reset_branch;
778
779 for (i = 1; i < 8; i++)
780 {
781 /* if there's a static vector specified for this exception, override */
782 if (xscale->static_high_vectors_set & (1 << i))
783 {
784 xscale->high_vectors[i] = xscale->static_high_vectors[i];
785 }
786 else
787 {
788 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
789 if (retval == ERROR_TARGET_TIMEOUT)
790 return retval;
791 if (retval != ERROR_OK)
792 {
793 /* Some of these reads will fail as part of normal execution */
794 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
795 }
796 }
797 }
798
799 for (i = 1; i < 8; i++)
800 {
801 if (xscale->static_low_vectors_set & (1 << i))
802 {
803 xscale->low_vectors[i] = xscale->static_low_vectors[i];
804 }
805 else
806 {
807 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
808 if (retval == ERROR_TARGET_TIMEOUT)
809 return retval;
810 if (retval != ERROR_OK)
811 {
812 /* Some of these reads will fail as part of normal execution */
813 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
814 }
815 }
816 }
817
818 /* calculate branches to debug handler */
819 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
820 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
821
822 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
823 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
824
825 /* invalidate and load exception vectors in mini i-cache */
826 xscale_invalidate_ic_line(target, 0x0);
827 xscale_invalidate_ic_line(target, 0xffff0000);
828
829 xscale_load_ic(target, 0x0, xscale->low_vectors);
830 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
831
832 return ERROR_OK;
833 }
834
835 static int xscale_arch_state(struct target *target)
836 {
837 struct xscale_common *xscale = target_to_xscale(target);
838 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
839
840 static const char *state[] =
841 {
842 "disabled", "enabled"
843 };
844
845 static const char *arch_dbg_reason[] =
846 {
847 "", "\n(processor reset)", "\n(trace buffer full)"
848 };
849
850 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
851 {
852 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
853 return ERROR_INVALID_ARGUMENTS;
854 }
855
856 LOG_USER("target halted in %s state due to %s, current mode: %s\n"
857 "cpsr: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "\n"
858 "MMU: %s, D-Cache: %s, I-Cache: %s"
859 "%s",
860 armv4_5_state_strings[armv4_5->core_state],
861 Jim_Nvp_value2name_simple(nvp_target_debug_reason, target->debug_reason)->name ,
862 armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)],
863 buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32),
864 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32),
865 state[xscale->armv4_5_mmu.mmu_enabled],
866 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
867 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
868 arch_dbg_reason[xscale->arch_debug_reason]);
869
870 return ERROR_OK;
871 }
872
873 static int xscale_poll(struct target *target)
874 {
875 int retval = ERROR_OK;
876
877 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
878 {
879 enum target_state previous_state = target->state;
880 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
881 {
882
883 /* there's data to read from the tx register, we entered debug state */
884 target->state = TARGET_HALTED;
885
886 /* process debug entry, fetching current mode regs */
887 retval = xscale_debug_entry(target);
888 }
889 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
890 {
891 LOG_USER("error while polling TX register, reset CPU");
892 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
893 target->state = TARGET_HALTED;
894 }
895
896 /* debug_entry could have overwritten target state (i.e. immediate resume)
897 * don't signal event handlers in that case
898 */
899 if (target->state != TARGET_HALTED)
900 return ERROR_OK;
901
902 /* if target was running, signal that we halted
903 * otherwise we reentered from debug execution */
904 if (previous_state == TARGET_RUNNING)
905 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
906 else
907 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
908 }
909
910 return retval;
911 }
912
913 static int xscale_debug_entry(struct target *target)
914 {
915 struct xscale_common *xscale = target_to_xscale(target);
916 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
917 uint32_t pc;
918 uint32_t buffer[10];
919 int i;
920 int retval;
921 uint32_t moe;
922
923 /* clear external dbg break (will be written on next DCSR read) */
924 xscale->external_debug_break = 0;
925 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
926 return retval;
927
928 /* get r0, pc, r1 to r7 and cpsr */
929 if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
930 return retval;
931
932 /* move r0 from buffer to register cache */
933 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
934 armv4_5->core_cache->reg_list[0].dirty = 1;
935 armv4_5->core_cache->reg_list[0].valid = 1;
936 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
937
938 /* move pc from buffer to register cache */
939 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
940 armv4_5->core_cache->reg_list[15].dirty = 1;
941 armv4_5->core_cache->reg_list[15].valid = 1;
942 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
943
944 /* move data from buffer to register cache */
945 for (i = 1; i <= 7; i++)
946 {
947 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
948 armv4_5->core_cache->reg_list[i].dirty = 1;
949 armv4_5->core_cache->reg_list[i].valid = 1;
950 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
951 }
952
953 buf_set_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32, buffer[9]);
954 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty = 1;
955 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].valid = 1;
956 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
957
958 armv4_5->core_mode = buffer[9] & 0x1f;
959 if (armv4_5_mode_to_number(armv4_5->core_mode) == -1)
960 {
961 target->state = TARGET_UNKNOWN;
962 LOG_ERROR("cpsr contains invalid mode value - communication failure");
963 return ERROR_TARGET_FAILURE;
964 }
965 LOG_DEBUG("target entered debug state in %s mode", armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)]);
966
967 if (buffer[9] & 0x20)
968 armv4_5->core_state = ARMV4_5_STATE_THUMB;
969 else
970 armv4_5->core_state = ARMV4_5_STATE_ARM;
971
972
973 if (armv4_5_mode_to_number(armv4_5->core_mode)==-1)
974 return ERROR_FAIL;
975
976 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
977 if ((armv4_5->core_mode != ARMV4_5_MODE_USR) && (armv4_5->core_mode != ARMV4_5_MODE_SYS))
978 {
979 xscale_receive(target, buffer, 8);
980 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
981 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).dirty = 0;
982 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).valid = 1;
983 }
984 else
985 {
986 /* r8 to r14, but no spsr */
987 xscale_receive(target, buffer, 7);
988 }
989
990 /* move data from buffer to register cache */
991 for (i = 8; i <= 14; i++)
992 {
993 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).value, 0, 32, buffer[i - 8]);
994 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 0;
995 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid = 1;
996 }
997
998 /* examine debug reason */
999 xscale_read_dcsr(target);
1000 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
1001
1002 /* stored PC (for calculating fixup) */
1003 pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1004
1005 switch (moe)
1006 {
1007 case 0x0: /* Processor reset */
1008 target->debug_reason = DBG_REASON_DBGRQ;
1009 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
1010 pc -= 4;
1011 break;
1012 case 0x1: /* Instruction breakpoint hit */
1013 target->debug_reason = DBG_REASON_BREAKPOINT;
1014 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1015 pc -= 4;
1016 break;
1017 case 0x2: /* Data breakpoint hit */
1018 target->debug_reason = DBG_REASON_WATCHPOINT;
1019 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1020 pc -= 4;
1021 break;
1022 case 0x3: /* BKPT instruction executed */
1023 target->debug_reason = DBG_REASON_BREAKPOINT;
1024 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1025 pc -= 4;
1026 break;
1027 case 0x4: /* Ext. debug event */
1028 target->debug_reason = DBG_REASON_DBGRQ;
1029 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1030 pc -= 4;
1031 break;
1032 case 0x5: /* Vector trap occured */
1033 target->debug_reason = DBG_REASON_BREAKPOINT;
1034 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1035 pc -= 4;
1036 break;
1037 case 0x6: /* Trace buffer full break */
1038 target->debug_reason = DBG_REASON_DBGRQ;
1039 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1040 pc -= 4;
1041 break;
1042 case 0x7: /* Reserved (may flag Hot-Debug support) */
1043 default:
1044 LOG_ERROR("Method of Entry is 'Reserved'");
1045 exit(-1);
1046 break;
1047 }
1048
1049 /* apply PC fixup */
1050 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
1051
1052 /* on the first debug entry, identify cache type */
1053 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1054 {
1055 uint32_t cache_type_reg;
1056
1057 /* read cp15 cache type register */
1058 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1059 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1060
1061 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1062 }
1063
1064 /* examine MMU and Cache settings */
1065 /* read cp15 control register */
1066 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1067 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1068 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1069 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1070 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1071
1072 /* tracing enabled, read collected trace data */
1073 if (xscale->trace.buffer_enabled)
1074 {
1075 xscale_read_trace(target);
1076 xscale->trace.buffer_fill--;
1077
1078 /* resume if we're still collecting trace data */
1079 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1080 && (xscale->trace.buffer_fill > 0))
1081 {
1082 xscale_resume(target, 1, 0x0, 1, 0);
1083 }
1084 else
1085 {
1086 xscale->trace.buffer_enabled = 0;
1087 }
1088 }
1089
1090 return ERROR_OK;
1091 }
1092
1093 static int xscale_halt(struct target *target)
1094 {
1095 struct xscale_common *xscale = target_to_xscale(target);
1096
1097 LOG_DEBUG("target->state: %s",
1098 target_state_name(target));
1099
1100 if (target->state == TARGET_HALTED)
1101 {
1102 LOG_DEBUG("target was already halted");
1103 return ERROR_OK;
1104 }
1105 else if (target->state == TARGET_UNKNOWN)
1106 {
1107 /* this must not happen for a xscale target */
1108 LOG_ERROR("target was in unknown state when halt was requested");
1109 return ERROR_TARGET_INVALID;
1110 }
1111 else if (target->state == TARGET_RESET)
1112 {
1113 LOG_DEBUG("target->state == TARGET_RESET");
1114 }
1115 else
1116 {
1117 /* assert external dbg break */
1118 xscale->external_debug_break = 1;
1119 xscale_read_dcsr(target);
1120
1121 target->debug_reason = DBG_REASON_DBGRQ;
1122 }
1123
1124 return ERROR_OK;
1125 }
1126
1127 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1128 {
1129 struct xscale_common *xscale = target_to_xscale(target);
1130 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1131 int retval;
1132
1133 if (xscale->ibcr0_used)
1134 {
1135 struct breakpoint *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1136
1137 if (ibcr0_bp)
1138 {
1139 xscale_unset_breakpoint(target, ibcr0_bp);
1140 }
1141 else
1142 {
1143 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1144 exit(-1);
1145 }
1146 }
1147
1148 if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
1149 return retval;
1150
1151 return ERROR_OK;
1152 }
1153
1154 static int xscale_disable_single_step(struct target *target)
1155 {
1156 struct xscale_common *xscale = target_to_xscale(target);
1157 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1158 int retval;
1159
1160 if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
1161 return retval;
1162
1163 return ERROR_OK;
1164 }
1165
1166 static void xscale_enable_watchpoints(struct target *target)
1167 {
1168 struct watchpoint *watchpoint = target->watchpoints;
1169
1170 while (watchpoint)
1171 {
1172 if (watchpoint->set == 0)
1173 xscale_set_watchpoint(target, watchpoint);
1174 watchpoint = watchpoint->next;
1175 }
1176 }
1177
1178 static void xscale_enable_breakpoints(struct target *target)
1179 {
1180 struct breakpoint *breakpoint = target->breakpoints;
1181
1182 /* set any pending breakpoints */
1183 while (breakpoint)
1184 {
1185 if (breakpoint->set == 0)
1186 xscale_set_breakpoint(target, breakpoint);
1187 breakpoint = breakpoint->next;
1188 }
1189 }
1190
1191 static int xscale_resume(struct target *target, int current,
1192 uint32_t address, int handle_breakpoints, int debug_execution)
1193 {
1194 struct xscale_common *xscale = target_to_xscale(target);
1195 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
1196 struct breakpoint *breakpoint = target->breakpoints;
1197 uint32_t current_pc;
1198 int retval;
1199 int i;
1200
1201 LOG_DEBUG("-");
1202
1203 if (target->state != TARGET_HALTED)
1204 {
1205 LOG_WARNING("target not halted");
1206 return ERROR_TARGET_NOT_HALTED;
1207 }
1208
1209 if (!debug_execution)
1210 {
1211 target_free_all_working_areas(target);
1212 }
1213
1214 /* update vector tables */
1215 if ((retval = xscale_update_vectors(target)) != ERROR_OK)
1216 return retval;
1217
1218 /* current = 1: continue on current pc, otherwise continue at <address> */
1219 if (!current)
1220 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1221
1222 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1223
1224 /* if we're at the reset vector, we have to simulate the branch */
1225 if (current_pc == 0x0)
1226 {
1227 arm_simulate_step(target, NULL);
1228 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1229 }
1230
1231 /* the front-end may request us not to handle breakpoints */
1232 if (handle_breakpoints)
1233 {
1234 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1235 {
1236 uint32_t next_pc;
1237
1238 /* there's a breakpoint at the current PC, we have to step over it */
1239 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1240 xscale_unset_breakpoint(target, breakpoint);
1241
1242 /* calculate PC of next instruction */
1243 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1244 {
1245 uint32_t current_opcode;
1246 target_read_u32(target, current_pc, &current_opcode);
1247 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1248 }
1249
1250 LOG_DEBUG("enable single-step");
1251 xscale_enable_single_step(target, next_pc);
1252
1253 /* restore banked registers */
1254 xscale_restore_context(target);
1255
1256 /* send resume request (command 0x30 or 0x31)
1257 * clean the trace buffer if it is to be enabled (0x62) */
1258 if (xscale->trace.buffer_enabled)
1259 {
1260 xscale_send_u32(target, 0x62);
1261 xscale_send_u32(target, 0x31);
1262 }
1263 else
1264 xscale_send_u32(target, 0x30);
1265
1266 /* send CPSR */
1267 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1268 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1269
1270 for (i = 7; i >= 0; i--)
1271 {
1272 /* send register */
1273 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1274 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1275 }
1276
1277 /* send PC */
1278 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1279 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1280
1281 /* wait for and process debug entry */
1282 xscale_debug_entry(target);
1283
1284 LOG_DEBUG("disable single-step");
1285 xscale_disable_single_step(target);
1286
1287 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1288 xscale_set_breakpoint(target, breakpoint);
1289 }
1290 }
1291
1292 /* enable any pending breakpoints and watchpoints */
1293 xscale_enable_breakpoints(target);
1294 xscale_enable_watchpoints(target);
1295
1296 /* restore banked registers */
1297 xscale_restore_context(target);
1298
1299 /* send resume request (command 0x30 or 0x31)
1300 * clean the trace buffer if it is to be enabled (0x62) */
1301 if (xscale->trace.buffer_enabled)
1302 {
1303 xscale_send_u32(target, 0x62);
1304 xscale_send_u32(target, 0x31);
1305 }
1306 else
1307 xscale_send_u32(target, 0x30);
1308
1309 /* send CPSR */
1310 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1311 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1312
1313 for (i = 7; i >= 0; i--)
1314 {
1315 /* send register */
1316 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1317 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1318 }
1319
1320 /* send PC */
1321 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1322 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1323
1324 target->debug_reason = DBG_REASON_NOTHALTED;
1325
1326 if (!debug_execution)
1327 {
1328 /* registers are now invalid */
1329 armv4_5_invalidate_core_regs(target);
1330 target->state = TARGET_RUNNING;
1331 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1332 }
1333 else
1334 {
1335 target->state = TARGET_DEBUG_RUNNING;
1336 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1337 }
1338
1339 LOG_DEBUG("target resumed");
1340
1341 return ERROR_OK;
1342 }
1343
1344 static int xscale_step_inner(struct target *target, int current,
1345 uint32_t address, int handle_breakpoints)
1346 {
1347 struct xscale_common *xscale = target_to_xscale(target);
1348 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
1349 uint32_t next_pc;
1350 int retval;
1351 int i;
1352
1353 target->debug_reason = DBG_REASON_SINGLESTEP;
1354
1355 /* calculate PC of next instruction */
1356 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1357 {
1358 uint32_t current_opcode, current_pc;
1359 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1360
1361 target_read_u32(target, current_pc, &current_opcode);
1362 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1363 return retval;
1364 }
1365
1366 LOG_DEBUG("enable single-step");
1367 if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
1368 return retval;
1369
1370 /* restore banked registers */
1371 if ((retval = xscale_restore_context(target)) != ERROR_OK)
1372 return retval;
1373
1374 /* send resume request (command 0x30 or 0x31)
1375 * clean the trace buffer if it is to be enabled (0x62) */
1376 if (xscale->trace.buffer_enabled)
1377 {
1378 if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
1379 return retval;
1380 if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
1381 return retval;
1382 }
1383 else
1384 if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
1385 return retval;
1386
1387 /* send CPSR */
1388 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32))) != ERROR_OK)
1389 return retval;
1390 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1391
1392 for (i = 7; i >= 0; i--)
1393 {
1394 /* send register */
1395 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
1396 return retval;
1397 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1398 }
1399
1400 /* send PC */
1401 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))) != ERROR_OK)
1402 return retval;
1403 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1404
1405 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1406
1407 /* registers are now invalid */
1408 if ((retval = armv4_5_invalidate_core_regs(target)) != ERROR_OK)
1409 return retval;
1410
1411 /* wait for and process debug entry */
1412 if ((retval = xscale_debug_entry(target)) != ERROR_OK)
1413 return retval;
1414
1415 LOG_DEBUG("disable single-step");
1416 if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
1417 return retval;
1418
1419 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1420
1421 return ERROR_OK;
1422 }
1423
1424 static int xscale_step(struct target *target, int current,
1425 uint32_t address, int handle_breakpoints)
1426 {
1427 struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
1428 struct breakpoint *breakpoint = target->breakpoints;
1429
1430 uint32_t current_pc;
1431 int retval;
1432
1433 if (target->state != TARGET_HALTED)
1434 {
1435 LOG_WARNING("target not halted");
1436 return ERROR_TARGET_NOT_HALTED;
1437 }
1438
1439 /* current = 1: continue on current pc, otherwise continue at <address> */
1440 if (!current)
1441 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1442
1443 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1444
1445 /* if we're at the reset vector, we have to simulate the step */
1446 if (current_pc == 0x0)
1447 {
1448 if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
1449 return retval;
1450 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1451
1452 target->debug_reason = DBG_REASON_SINGLESTEP;
1453 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1454
1455 return ERROR_OK;
1456 }
1457
1458 /* the front-end may request us not to handle breakpoints */
1459 if (handle_breakpoints)
1460 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1461 {
1462 if ((retval = xscale_unset_breakpoint(target, breakpoint)) != ERROR_OK)
1463 return retval;
1464 }
1465
1466 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1467
1468 if (breakpoint)
1469 {
1470 xscale_set_breakpoint(target, breakpoint);
1471 }
1472
1473 LOG_DEBUG("target stepped");
1474
1475 return ERROR_OK;
1476
1477 }
1478
1479 static int xscale_assert_reset(struct target *target)
1480 {
1481 struct xscale_common *xscale = target_to_xscale(target);
1482
1483 LOG_DEBUG("target->state: %s",
1484 target_state_name(target));
1485
1486 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1487 * end up in T-L-R, which would reset JTAG
1488 */
1489 jtag_set_end_state(TAP_IDLE);
1490 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
1491
1492 /* set Hold reset, Halt mode and Trap Reset */
1493 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1494 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1495 xscale_write_dcsr(target, 1, 0);
1496
1497 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1498 xscale_jtag_set_instr(target->tap, 0x7f);
1499 jtag_execute_queue();
1500
1501 /* assert reset */
1502 jtag_add_reset(0, 1);
1503
1504 /* sleep 1ms, to be sure we fulfill any requirements */
1505 jtag_add_sleep(1000);
1506 jtag_execute_queue();
1507
1508 target->state = TARGET_RESET;
1509
1510 if (target->reset_halt)
1511 {
1512 int retval;
1513 if ((retval = target_halt(target)) != ERROR_OK)
1514 return retval;
1515 }
1516
1517 return ERROR_OK;
1518 }
1519
1520 static int xscale_deassert_reset(struct target *target)
1521 {
1522 struct xscale_common *xscale = target_to_xscale(target);
1523 struct breakpoint *breakpoint = target->breakpoints;
1524
1525 LOG_DEBUG("-");
1526
1527 xscale->ibcr_available = 2;
1528 xscale->ibcr0_used = 0;
1529 xscale->ibcr1_used = 0;
1530
1531 xscale->dbr_available = 2;
1532 xscale->dbr0_used = 0;
1533 xscale->dbr1_used = 0;
1534
1535 /* mark all hardware breakpoints as unset */
1536 while (breakpoint)
1537 {
1538 if (breakpoint->type == BKPT_HARD)
1539 {
1540 breakpoint->set = 0;
1541 }
1542 breakpoint = breakpoint->next;
1543 }
1544
1545 armv4_5_invalidate_core_regs(target);
1546
1547 /* FIXME mark hardware watchpoints got unset too. Also,
1548 * at least some of the XScale registers are invalid...
1549 */
1550
1551 /*
1552 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1553 * contents got invalidated. Safer to force that, so writing new
1554 * contents can't ever fail..
1555 */
1556 {
1557 uint32_t address;
1558 unsigned buf_cnt;
1559 const uint8_t *buffer = xscale_debug_handler;
1560 int retval;
1561
1562 /* release SRST */
1563 jtag_add_reset(0, 0);
1564
1565 /* wait 300ms; 150 and 100ms were not enough */
1566 jtag_add_sleep(300*1000);
1567
1568 jtag_add_runtest(2030, jtag_set_end_state(TAP_IDLE));
1569 jtag_execute_queue();
1570
1571 /* set Hold reset, Halt mode and Trap Reset */
1572 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1573 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1574 xscale_write_dcsr(target, 1, 0);
1575
1576 /* Load the debug handler into the mini-icache. Since
1577 * it's using halt mode (not monitor mode), it runs in
1578 * "Special Debug State" for access to registers, memory,
1579 * coprocessors, trace data, etc.
1580 */
1581 address = xscale->handler_address;
1582 for (unsigned binary_size = sizeof xscale_debug_handler - 1;
1583 binary_size > 0;
1584 binary_size -= buf_cnt, buffer += buf_cnt)
1585 {
1586 uint32_t cache_line[8];
1587 unsigned i;
1588
1589 buf_cnt = binary_size;
1590 if (buf_cnt > 32)
1591 buf_cnt = 32;
1592
1593 for (i = 0; i < buf_cnt; i += 4)
1594 {
1595 /* convert LE buffer to host-endian uint32_t */
1596 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1597 }
1598
1599 for (; i < 32; i += 4)
1600 {
1601 cache_line[i / 4] = 0xe1a08008;
1602 }
1603
1604 /* only load addresses other than the reset vectors */
1605 if ((address % 0x400) != 0x0)
1606 {
1607 retval = xscale_load_ic(target, address,
1608 cache_line);
1609 if (retval != ERROR_OK)
1610 return retval;
1611 }
1612
1613 address += buf_cnt;
1614 };
1615
1616 retval = xscale_load_ic(target, 0x0,
1617 xscale->low_vectors);
1618 if (retval != ERROR_OK)
1619 return retval;
1620 retval = xscale_load_ic(target, 0xffff0000,
1621 xscale->high_vectors);
1622 if (retval != ERROR_OK)
1623 return retval;
1624
1625 jtag_add_runtest(30, jtag_set_end_state(TAP_IDLE));
1626
1627 jtag_add_sleep(100000);
1628
1629 /* set Hold reset, Halt mode and Trap Reset */
1630 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1631 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1632 xscale_write_dcsr(target, 1, 0);
1633
1634 /* clear Hold reset to let the target run (should enter debug handler) */
1635 xscale_write_dcsr(target, 0, 1);
1636 target->state = TARGET_RUNNING;
1637
1638 if (!target->reset_halt)
1639 {
1640 jtag_add_sleep(10000);
1641
1642 /* we should have entered debug now */
1643 xscale_debug_entry(target);
1644 target->state = TARGET_HALTED;
1645
1646 /* resume the target */
1647 xscale_resume(target, 1, 0x0, 1, 0);
1648 }
1649 }
1650
1651 return ERROR_OK;
1652 }
1653
1654 static int xscale_read_core_reg(struct target *target, int num,
1655 enum armv4_5_mode mode)
1656 {
1657 LOG_ERROR("not implemented");
1658 return ERROR_OK;
1659 }
1660
1661 static int xscale_write_core_reg(struct target *target, int num,
1662 enum armv4_5_mode mode, uint32_t value)
1663 {
1664 LOG_ERROR("not implemented");
1665 return ERROR_OK;
1666 }
1667
1668 static int xscale_full_context(struct target *target)
1669 {
1670 struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
1671
1672 uint32_t *buffer;
1673
1674 int i, j;
1675
1676 LOG_DEBUG("-");
1677
1678 if (target->state != TARGET_HALTED)
1679 {
1680 LOG_WARNING("target not halted");
1681 return ERROR_TARGET_NOT_HALTED;
1682 }
1683
1684 buffer = malloc(4 * 8);
1685
1686 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1687 * we can't enter User mode on an XScale (unpredictable),
1688 * but User shares registers with SYS
1689 */
1690 for (i = 1; i < 7; i++)
1691 {
1692 int valid = 1;
1693
1694 /* check if there are invalid registers in the current mode
1695 */
1696 for (j = 0; j <= 16; j++)
1697 {
1698 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid == 0)
1699 valid = 0;
1700 }
1701
1702 if (!valid)
1703 {
1704 uint32_t tmp_cpsr;
1705
1706 /* request banked registers */
1707 xscale_send_u32(target, 0x0);
1708
1709 tmp_cpsr = 0x0;
1710 tmp_cpsr |= armv4_5_number_to_mode(i);
1711 tmp_cpsr |= 0xc0; /* I/F bits */
1712
1713 /* send CPSR for desired mode */
1714 xscale_send_u32(target, tmp_cpsr);
1715
1716 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1717 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1718 {
1719 xscale_receive(target, buffer, 8);
1720 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1721 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1722 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).valid = 1;
1723 }
1724 else
1725 {
1726 xscale_receive(target, buffer, 7);
1727 }
1728
1729 /* move data from buffer to register cache */
1730 for (j = 8; j <= 14; j++)
1731 {
1732 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).value, 0, 32, buffer[j - 8]);
1733 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1734 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid = 1;
1735 }
1736 }
1737 }
1738
1739 free(buffer);
1740
1741 return ERROR_OK;
1742 }
1743
1744 static int xscale_restore_context(struct target *target)
1745 {
1746 struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
1747
1748 int i, j;
1749
1750 if (target->state != TARGET_HALTED)
1751 {
1752 LOG_WARNING("target not halted");
1753 return ERROR_TARGET_NOT_HALTED;
1754 }
1755
1756 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1757 * we can't enter User mode on an XScale (unpredictable),
1758 * but User shares registers with SYS
1759 */
1760 for (i = 1; i < 7; i++)
1761 {
1762 int dirty = 0;
1763
1764 /* check if there are invalid registers in the current mode
1765 */
1766 for (j = 8; j <= 14; j++)
1767 {
1768 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty == 1)
1769 dirty = 1;
1770 }
1771
1772 /* if not USR/SYS, check if the SPSR needs to be written */
1773 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1774 {
1775 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty == 1)
1776 dirty = 1;
1777 }
1778
1779 if (dirty)
1780 {
1781 uint32_t tmp_cpsr;
1782
1783 /* send banked registers */
1784 xscale_send_u32(target, 0x1);
1785
1786 tmp_cpsr = 0x0;
1787 tmp_cpsr |= armv4_5_number_to_mode(i);
1788 tmp_cpsr |= 0xc0; /* I/F bits */
1789
1790 /* send CPSR for desired mode */
1791 xscale_send_u32(target, tmp_cpsr);
1792
1793 /* send banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1794 for (j = 8; j <= 14; j++)
1795 {
1796 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, j).value, 0, 32));
1797 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1798 }
1799
1800 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1801 {
1802 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32));
1803 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1804 }
1805 }
1806 }
1807
1808 return ERROR_OK;
1809 }
1810
1811 static int xscale_read_memory(struct target *target, uint32_t address,
1812 uint32_t size, uint32_t count, uint8_t *buffer)
1813 {
1814 struct xscale_common *xscale = target_to_xscale(target);
1815 uint32_t *buf32;
1816 uint32_t i;
1817 int retval;
1818
1819 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1820
1821 if (target->state != TARGET_HALTED)
1822 {
1823 LOG_WARNING("target not halted");
1824 return ERROR_TARGET_NOT_HALTED;
1825 }
1826
1827 /* sanitize arguments */
1828 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1829 return ERROR_INVALID_ARGUMENTS;
1830
1831 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1832 return ERROR_TARGET_UNALIGNED_ACCESS;
1833
1834 /* send memory read request (command 0x1n, n: access size) */
1835 if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
1836 return retval;
1837
1838 /* send base address for read request */
1839 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1840 return retval;
1841
1842 /* send number of requested data words */
1843 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1844 return retval;
1845
1846 /* receive data from target (count times 32-bit words in host endianness) */
1847 buf32 = malloc(4 * count);
1848 if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
1849 return retval;
1850
1851 /* extract data from host-endian buffer into byte stream */
1852 for (i = 0; i < count; i++)
1853 {
1854 switch (size)
1855 {
1856 case 4:
1857 target_buffer_set_u32(target, buffer, buf32[i]);
1858 buffer += 4;
1859 break;
1860 case 2:
1861 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1862 buffer += 2;
1863 break;
1864 case 1:
1865 *buffer++ = buf32[i] & 0xff;
1866 break;
1867 default:
1868 LOG_ERROR("invalid read size");
1869 return ERROR_INVALID_ARGUMENTS;
1870 }
1871 }
1872
1873 free(buf32);
1874
1875 /* examine DCSR, to see if Sticky Abort (SA) got set */
1876 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1877 return retval;
1878 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1879 {
1880 /* clear SA bit */
1881 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1882 return retval;
1883
1884 return ERROR_TARGET_DATA_ABORT;
1885 }
1886
1887 return ERROR_OK;
1888 }
1889
1890 static int xscale_write_memory(struct target *target, uint32_t address,
1891 uint32_t size, uint32_t count, uint8_t *buffer)
1892 {
1893 struct xscale_common *xscale = target_to_xscale(target);
1894 int retval;
1895
1896 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1897
1898 if (target->state != TARGET_HALTED)
1899 {
1900 LOG_WARNING("target not halted");
1901 return ERROR_TARGET_NOT_HALTED;
1902 }
1903
1904 /* sanitize arguments */
1905 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1906 return ERROR_INVALID_ARGUMENTS;
1907
1908 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1909 return ERROR_TARGET_UNALIGNED_ACCESS;
1910
1911 /* send memory write request (command 0x2n, n: access size) */
1912 if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
1913 return retval;
1914
1915 /* send base address for read request */
1916 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1917 return retval;
1918
1919 /* send number of requested data words to be written*/
1920 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1921 return retval;
1922
1923 /* extract data from host-endian buffer into byte stream */
1924 #if 0
1925 for (i = 0; i < count; i++)
1926 {
1927 switch (size)
1928 {
1929 case 4:
1930 value = target_buffer_get_u32(target, buffer);
1931 xscale_send_u32(target, value);
1932 buffer += 4;
1933 break;
1934 case 2:
1935 value = target_buffer_get_u16(target, buffer);
1936 xscale_send_u32(target, value);
1937 buffer += 2;
1938 break;
1939 case 1:
1940 value = *buffer;
1941 xscale_send_u32(target, value);
1942 buffer += 1;
1943 break;
1944 default:
1945 LOG_ERROR("should never get here");
1946 exit(-1);
1947 }
1948 }
1949 #endif
1950 if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
1951 return retval;
1952
1953 /* examine DCSR, to see if Sticky Abort (SA) got set */
1954 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1955 return retval;
1956 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1957 {
1958 /* clear SA bit */
1959 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1960 return retval;
1961
1962 return ERROR_TARGET_DATA_ABORT;
1963 }
1964
1965 return ERROR_OK;
1966 }
1967
1968 static int xscale_bulk_write_memory(struct target *target, uint32_t address,
1969 uint32_t count, uint8_t *buffer)
1970 {
1971 return xscale_write_memory(target, address, 4, count, buffer);
1972 }
1973
1974 static uint32_t xscale_get_ttb(struct target *target)
1975 {
1976 struct xscale_common *xscale = target_to_xscale(target);
1977 uint32_t ttb;
1978
1979 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
1980 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
1981
1982 return ttb;
1983 }
1984
1985 static void xscale_disable_mmu_caches(struct target *target, int mmu,
1986 int d_u_cache, int i_cache)
1987 {
1988 struct xscale_common *xscale = target_to_xscale(target);
1989 uint32_t cp15_control;
1990
1991 /* read cp15 control register */
1992 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1993 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1994
1995 if (mmu)
1996 cp15_control &= ~0x1U;
1997
1998 if (d_u_cache)
1999 {
2000 /* clean DCache */
2001 xscale_send_u32(target, 0x50);
2002 xscale_send_u32(target, xscale->cache_clean_address);
2003
2004 /* invalidate DCache */
2005 xscale_send_u32(target, 0x51);
2006
2007 cp15_control &= ~0x4U;
2008 }
2009
2010 if (i_cache)
2011 {
2012 /* invalidate ICache */
2013 xscale_send_u32(target, 0x52);
2014 cp15_control &= ~0x1000U;
2015 }
2016
2017 /* write new cp15 control register */
2018 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2019
2020 /* execute cpwait to ensure outstanding operations complete */
2021 xscale_send_u32(target, 0x53);
2022 }
2023
2024 static void xscale_enable_mmu_caches(struct target *target, int mmu,
2025 int d_u_cache, int i_cache)
2026 {
2027 struct xscale_common *xscale = target_to_xscale(target);
2028 uint32_t cp15_control;
2029
2030 /* read cp15 control register */
2031 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2032 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2033
2034 if (mmu)
2035 cp15_control |= 0x1U;
2036
2037 if (d_u_cache)
2038 cp15_control |= 0x4U;
2039
2040 if (i_cache)
2041 cp15_control |= 0x1000U;
2042
2043 /* write new cp15 control register */
2044 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2045
2046 /* execute cpwait to ensure outstanding operations complete */
2047 xscale_send_u32(target, 0x53);
2048 }
2049
2050 static int xscale_set_breakpoint(struct target *target,
2051 struct breakpoint *breakpoint)
2052 {
2053 int retval;
2054 struct xscale_common *xscale = target_to_xscale(target);
2055
2056 if (target->state != TARGET_HALTED)
2057 {
2058 LOG_WARNING("target not halted");
2059 return ERROR_TARGET_NOT_HALTED;
2060 }
2061
2062 if (breakpoint->set)
2063 {
2064 LOG_WARNING("breakpoint already set");
2065 return ERROR_OK;
2066 }
2067
2068 if (breakpoint->type == BKPT_HARD)
2069 {
2070 uint32_t value = breakpoint->address | 1;
2071 if (!xscale->ibcr0_used)
2072 {
2073 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2074 xscale->ibcr0_used = 1;
2075 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2076 }
2077 else if (!xscale->ibcr1_used)
2078 {
2079 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2080 xscale->ibcr1_used = 1;
2081 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2082 }
2083 else
2084 {
2085 LOG_ERROR("BUG: no hardware comparator available");
2086 return ERROR_OK;
2087 }
2088 }
2089 else if (breakpoint->type == BKPT_SOFT)
2090 {
2091 if (breakpoint->length == 4)
2092 {
2093 /* keep the original instruction in target endianness */
2094 if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2095 {
2096 return retval;
2097 }
2098 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2099 if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2100 {
2101 return retval;
2102 }
2103 }
2104 else
2105 {
2106 /* keep the original instruction in target endianness */
2107 if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2108 {
2109 return retval;
2110 }
2111 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2112 if ((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2113 {
2114 return retval;
2115 }
2116 }
2117 breakpoint->set = 1;
2118 }
2119
2120 return ERROR_OK;
2121 }
2122
2123 static int xscale_add_breakpoint(struct target *target,
2124 struct breakpoint *breakpoint)
2125 {
2126 struct xscale_common *xscale = target_to_xscale(target);
2127
2128 if (target->state != TARGET_HALTED)
2129 {
2130 LOG_WARNING("target not halted");
2131 return ERROR_TARGET_NOT_HALTED;
2132 }
2133
2134 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2135 {
2136 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2137 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2138 }
2139
2140 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2141 {
2142 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2143 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2144 }
2145
2146 if (breakpoint->type == BKPT_HARD)
2147 {
2148 xscale->ibcr_available--;
2149 }
2150
2151 return ERROR_OK;
2152 }
2153
2154 static int xscale_unset_breakpoint(struct target *target,
2155 struct breakpoint *breakpoint)
2156 {
2157 int retval;
2158 struct xscale_common *xscale = target_to_xscale(target);
2159
2160 if (target->state != TARGET_HALTED)
2161 {
2162 LOG_WARNING("target not halted");
2163 return ERROR_TARGET_NOT_HALTED;
2164 }
2165
2166 if (!breakpoint->set)
2167 {
2168 LOG_WARNING("breakpoint not set");
2169 return ERROR_OK;
2170 }
2171
2172 if (breakpoint->type == BKPT_HARD)
2173 {
2174 if (breakpoint->set == 1)
2175 {
2176 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2177 xscale->ibcr0_used = 0;
2178 }
2179 else if (breakpoint->set == 2)
2180 {
2181 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2182 xscale->ibcr1_used = 0;
2183 }
2184 breakpoint->set = 0;
2185 }
2186 else
2187 {
2188 /* restore original instruction (kept in target endianness) */
2189 if (breakpoint->length == 4)
2190 {
2191 if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2192 {
2193 return retval;
2194 }
2195 }
2196 else
2197 {
2198 if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2199 {
2200 return retval;
2201 }
2202 }
2203 breakpoint->set = 0;
2204 }
2205
2206 return ERROR_OK;
2207 }
2208
2209 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2210 {
2211 struct xscale_common *xscale = target_to_xscale(target);
2212
2213 if (target->state != TARGET_HALTED)
2214 {
2215 LOG_WARNING("target not halted");
2216 return ERROR_TARGET_NOT_HALTED;
2217 }
2218
2219 if (breakpoint->set)
2220 {
2221 xscale_unset_breakpoint(target, breakpoint);
2222 }
2223
2224 if (breakpoint->type == BKPT_HARD)
2225 xscale->ibcr_available++;
2226
2227 return ERROR_OK;
2228 }
2229
2230 static int xscale_set_watchpoint(struct target *target,
2231 struct watchpoint *watchpoint)
2232 {
2233 struct xscale_common *xscale = target_to_xscale(target);
2234 uint8_t enable = 0;
2235 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2236 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2237
2238 if (target->state != TARGET_HALTED)
2239 {
2240 LOG_WARNING("target not halted");
2241 return ERROR_TARGET_NOT_HALTED;
2242 }
2243
2244 xscale_get_reg(dbcon);
2245
2246 switch (watchpoint->rw)
2247 {
2248 case WPT_READ:
2249 enable = 0x3;
2250 break;
2251 case WPT_ACCESS:
2252 enable = 0x2;
2253 break;
2254 case WPT_WRITE:
2255 enable = 0x1;
2256 break;
2257 default:
2258 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2259 }
2260
2261 if (!xscale->dbr0_used)
2262 {
2263 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2264 dbcon_value |= enable;
2265 xscale_set_reg_u32(dbcon, dbcon_value);
2266 watchpoint->set = 1;
2267 xscale->dbr0_used = 1;
2268 }
2269 else if (!xscale->dbr1_used)
2270 {
2271 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2272 dbcon_value |= enable << 2;
2273 xscale_set_reg_u32(dbcon, dbcon_value);
2274 watchpoint->set = 2;
2275 xscale->dbr1_used = 1;
2276 }
2277 else
2278 {
2279 LOG_ERROR("BUG: no hardware comparator available");
2280 return ERROR_OK;
2281 }
2282
2283 return ERROR_OK;
2284 }
2285
2286 static int xscale_add_watchpoint(struct target *target,
2287 struct watchpoint *watchpoint)
2288 {
2289 struct xscale_common *xscale = target_to_xscale(target);
2290
2291 if (target->state != TARGET_HALTED)
2292 {
2293 LOG_WARNING("target not halted");
2294 return ERROR_TARGET_NOT_HALTED;
2295 }
2296
2297 if (xscale->dbr_available < 1)
2298 {
2299 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2300 }
2301
2302 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2303 {
2304 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2305 }
2306
2307 xscale->dbr_available--;
2308
2309 return ERROR_OK;
2310 }
2311
2312 static int xscale_unset_watchpoint(struct target *target,
2313 struct watchpoint *watchpoint)
2314 {
2315 struct xscale_common *xscale = target_to_xscale(target);
2316 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2317 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2318
2319 if (target->state != TARGET_HALTED)
2320 {
2321 LOG_WARNING("target not halted");
2322 return ERROR_TARGET_NOT_HALTED;
2323 }
2324
2325 if (!watchpoint->set)
2326 {
2327 LOG_WARNING("breakpoint not set");
2328 return ERROR_OK;
2329 }
2330
2331 if (watchpoint->set == 1)
2332 {
2333 dbcon_value &= ~0x3;
2334 xscale_set_reg_u32(dbcon, dbcon_value);
2335 xscale->dbr0_used = 0;
2336 }
2337 else if (watchpoint->set == 2)
2338 {
2339 dbcon_value &= ~0xc;
2340 xscale_set_reg_u32(dbcon, dbcon_value);
2341 xscale->dbr1_used = 0;
2342 }
2343 watchpoint->set = 0;
2344
2345 return ERROR_OK;
2346 }
2347
2348 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2349 {
2350 struct xscale_common *xscale = target_to_xscale(target);
2351
2352 if (target->state != TARGET_HALTED)
2353 {
2354 LOG_WARNING("target not halted");
2355 return ERROR_TARGET_NOT_HALTED;
2356 }
2357
2358 if (watchpoint->set)
2359 {
2360 xscale_unset_watchpoint(target, watchpoint);
2361 }
2362
2363 xscale->dbr_available++;
2364
2365 return ERROR_OK;
2366 }
2367
2368 static int xscale_get_reg(struct reg *reg)
2369 {
2370 struct xscale_reg *arch_info = reg->arch_info;
2371 struct target *target = arch_info->target;
2372 struct xscale_common *xscale = target_to_xscale(target);
2373
2374 /* DCSR, TX and RX are accessible via JTAG */
2375 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2376 {
2377 return xscale_read_dcsr(arch_info->target);
2378 }
2379 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2380 {
2381 /* 1 = consume register content */
2382 return xscale_read_tx(arch_info->target, 1);
2383 }
2384 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2385 {
2386 /* can't read from RX register (host -> debug handler) */
2387 return ERROR_OK;
2388 }
2389 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2390 {
2391 /* can't (explicitly) read from TXRXCTRL register */
2392 return ERROR_OK;
2393 }
2394 else /* Other DBG registers have to be transfered by the debug handler */
2395 {
2396 /* send CP read request (command 0x40) */
2397 xscale_send_u32(target, 0x40);
2398
2399 /* send CP register number */
2400 xscale_send_u32(target, arch_info->dbg_handler_number);
2401
2402 /* read register value */
2403 xscale_read_tx(target, 1);
2404 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2405
2406 reg->dirty = 0;
2407 reg->valid = 1;
2408 }
2409
2410 return ERROR_OK;
2411 }
2412
2413 static int xscale_set_reg(struct reg *reg, uint8_t* buf)
2414 {
2415 struct xscale_reg *arch_info = reg->arch_info;
2416 struct target *target = arch_info->target;
2417 struct xscale_common *xscale = target_to_xscale(target);
2418 uint32_t value = buf_get_u32(buf, 0, 32);
2419
2420 /* DCSR, TX and RX are accessible via JTAG */
2421 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2422 {
2423 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2424 return xscale_write_dcsr(arch_info->target, -1, -1);
2425 }
2426 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2427 {
2428 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2429 return xscale_write_rx(arch_info->target);
2430 }
2431 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2432 {
2433 /* can't write to TX register (debug-handler -> host) */
2434 return ERROR_OK;
2435 }
2436 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2437 {
2438 /* can't (explicitly) write to TXRXCTRL register */
2439 return ERROR_OK;
2440 }
2441 else /* Other DBG registers have to be transfered by the debug handler */
2442 {
2443 /* send CP write request (command 0x41) */
2444 xscale_send_u32(target, 0x41);
2445
2446 /* send CP register number */
2447 xscale_send_u32(target, arch_info->dbg_handler_number);
2448
2449 /* send CP register value */
2450 xscale_send_u32(target, value);
2451 buf_set_u32(reg->value, 0, 32, value);
2452 }
2453
2454 return ERROR_OK;
2455 }
2456
2457 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2458 {
2459 struct xscale_common *xscale = target_to_xscale(target);
2460 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2461 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2462
2463 /* send CP write request (command 0x41) */
2464 xscale_send_u32(target, 0x41);
2465
2466 /* send CP register number */
2467 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2468
2469 /* send CP register value */
2470 xscale_send_u32(target, value);
2471 buf_set_u32(dcsr->value, 0, 32, value);
2472
2473 return ERROR_OK;
2474 }
2475
2476 static int xscale_read_trace(struct target *target)
2477 {
2478 struct xscale_common *xscale = target_to_xscale(target);
2479 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
2480 struct xscale_trace_data **trace_data_p;
2481
2482 /* 258 words from debug handler
2483 * 256 trace buffer entries
2484 * 2 checkpoint addresses
2485 */
2486 uint32_t trace_buffer[258];
2487 int is_address[256];
2488 int i, j;
2489
2490 if (target->state != TARGET_HALTED)
2491 {
2492 LOG_WARNING("target must be stopped to read trace data");
2493 return ERROR_TARGET_NOT_HALTED;
2494 }
2495
2496 /* send read trace buffer command (command 0x61) */
2497 xscale_send_u32(target, 0x61);
2498
2499 /* receive trace buffer content */
2500 xscale_receive(target, trace_buffer, 258);
2501
2502 /* parse buffer backwards to identify address entries */
2503 for (i = 255; i >= 0; i--)
2504 {
2505 is_address[i] = 0;
2506 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2507 ((trace_buffer[i] & 0xf0) == 0xd0))
2508 {
2509 if (i >= 3)
2510 is_address[--i] = 1;
2511 if (i >= 2)
2512 is_address[--i] = 1;
2513 if (i >= 1)
2514 is_address[--i] = 1;
2515 if (i >= 0)
2516 is_address[--i] = 1;
2517 }
2518 }
2519
2520
2521 /* search first non-zero entry */
2522 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2523 ;
2524
2525 if (j == 256)
2526 {
2527 LOG_DEBUG("no trace data collected");
2528 return ERROR_XSCALE_NO_TRACE_DATA;
2529 }
2530
2531 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2532 ;
2533
2534 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2535 (*trace_data_p)->next = NULL;
2536 (*trace_data_p)->chkpt0 = trace_buffer[256];
2537 (*trace_data_p)->chkpt1 = trace_buffer[257];
2538 (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
2539 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2540 (*trace_data_p)->depth = 256 - j;
2541
2542 for (i = j; i < 256; i++)
2543 {
2544 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2545 if (is_address[i])
2546 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2547 else
2548 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2549 }
2550
2551 return ERROR_OK;
2552 }
2553
2554 static int xscale_read_instruction(struct target *target,
2555 struct arm_instruction *instruction)
2556 {
2557 struct xscale_common *xscale = target_to_xscale(target);
2558 int i;
2559 int section = -1;
2560 size_t size_read;
2561 uint32_t opcode;
2562 int retval;
2563
2564 if (!xscale->trace.image)
2565 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2566
2567 /* search for the section the current instruction belongs to */
2568 for (i = 0; i < xscale->trace.image->num_sections; i++)
2569 {
2570 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2571 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2572 {
2573 section = i;
2574 break;
2575 }
2576 }
2577
2578 if (section == -1)
2579 {
2580 /* current instruction couldn't be found in the image */
2581 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2582 }
2583
2584 if (xscale->trace.core_state == ARMV4_5_STATE_ARM)
2585 {
2586 uint8_t buf[4];
2587 if ((retval = image_read_section(xscale->trace.image, section,
2588 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2589 4, buf, &size_read)) != ERROR_OK)
2590 {
2591 LOG_ERROR("error while reading instruction: %i", retval);
2592 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2593 }
2594 opcode = target_buffer_get_u32(target, buf);
2595 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2596 }
2597 else if (xscale->trace.core_state == ARMV4_5_STATE_THUMB)
2598 {
2599 uint8_t buf[2];
2600 if ((retval = image_read_section(xscale->trace.image, section,
2601 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2602 2, buf, &size_read)) != ERROR_OK)
2603 {
2604 LOG_ERROR("error while reading instruction: %i", retval);
2605 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2606 }
2607 opcode = target_buffer_get_u16(target, buf);
2608 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2609 }
2610 else
2611 {
2612 LOG_ERROR("BUG: unknown core state encountered");
2613 exit(-1);
2614 }
2615
2616 return ERROR_OK;
2617 }
2618
2619 static int xscale_branch_address(struct xscale_trace_data *trace_data,
2620 int i, uint32_t *target)
2621 {
2622 /* if there are less than four entries prior to the indirect branch message
2623 * we can't extract the address */
2624 if (i < 4)
2625 {
2626 return -1;
2627 }
2628
2629 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2630 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2631
2632 return 0;
2633 }
2634
2635 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2636 {
2637 struct xscale_common *xscale = target_to_xscale(target);
2638 int next_pc_ok = 0;
2639 uint32_t next_pc = 0x0;
2640 struct xscale_trace_data *trace_data = xscale->trace.data;
2641 int retval;
2642
2643 while (trace_data)
2644 {
2645 int i, chkpt;
2646 int rollover;
2647 int branch;
2648 int exception;
2649 xscale->trace.core_state = ARMV4_5_STATE_ARM;
2650
2651 chkpt = 0;
2652 rollover = 0;
2653
2654 for (i = 0; i < trace_data->depth; i++)
2655 {
2656 next_pc_ok = 0;
2657 branch = 0;
2658 exception = 0;
2659
2660 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2661 continue;
2662
2663 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2664 {
2665 case 0: /* Exceptions */
2666 case 1:
2667 case 2:
2668 case 3:
2669 case 4:
2670 case 5:
2671 case 6:
2672 case 7:
2673 exception = (trace_data->entries[i].data & 0x70) >> 4;
2674 next_pc_ok = 1;
2675 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2676 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2677 break;
2678 case 8: /* Direct Branch */
2679 branch = 1;
2680 break;
2681 case 9: /* Indirect Branch */
2682 branch = 1;
2683 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2684 {
2685 next_pc_ok = 1;
2686 }
2687 break;
2688 case 13: /* Checkpointed Indirect Branch */
2689 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2690 {
2691 next_pc_ok = 1;
2692 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2693 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2694 LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2695 }
2696 /* explicit fall-through */
2697 case 12: /* Checkpointed Direct Branch */
2698 branch = 1;
2699 if (chkpt == 0)
2700 {
2701 next_pc_ok = 1;
2702 next_pc = trace_data->chkpt0;
2703 chkpt++;
2704 }
2705 else if (chkpt == 1)
2706 {
2707 next_pc_ok = 1;
2708 next_pc = trace_data->chkpt0;
2709 chkpt++;
2710 }
2711 else
2712 {
2713 LOG_WARNING("more than two checkpointed branches encountered");
2714 }
2715 break;
2716 case 15: /* Roll-over */
2717 rollover++;
2718 continue;
2719 default: /* Reserved */
2720 command_print(cmd_ctx, "--- reserved trace message ---");
2721 LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2722 return ERROR_OK;
2723 }
2724
2725 if (xscale->trace.pc_ok)
2726 {
2727 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2728 struct arm_instruction instruction;
2729
2730 if ((exception == 6) || (exception == 7))
2731 {
2732 /* IRQ or FIQ exception, no instruction executed */
2733 executed -= 1;
2734 }
2735
2736 while (executed-- >= 0)
2737 {
2738 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2739 {
2740 /* can't continue tracing with no image available */
2741 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2742 {
2743 return retval;
2744 }
2745 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2746 {
2747 /* TODO: handle incomplete images */
2748 }
2749 }
2750
2751 /* a precise abort on a load to the PC is included in the incremental
2752 * word count, other instructions causing data aborts are not included
2753 */
2754 if ((executed == 0) && (exception == 4)
2755 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2756 {
2757 if ((instruction.type == ARM_LDM)
2758 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2759 {
2760 executed--;
2761 }
2762 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2763 && (instruction.info.load_store.Rd != 15))
2764 {
2765 executed--;
2766 }
2767 }
2768
2769 /* only the last instruction executed
2770 * (the one that caused the control flow change)
2771 * could be a taken branch
2772 */
2773 if (((executed == -1) && (branch == 1)) &&
2774 (((instruction.type == ARM_B) ||
2775 (instruction.type == ARM_BL) ||
2776 (instruction.type == ARM_BLX)) &&
2777 (instruction.info.b_bl_bx_blx.target_address != 0xffffffff)))
2778 {
2779 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2780 }
2781 else
2782 {
2783 xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2;
2784 }
2785 command_print(cmd_ctx, "%s", instruction.text);
2786 }
2787
2788 rollover = 0;
2789 }
2790
2791 if (next_pc_ok)
2792 {
2793 xscale->trace.current_pc = next_pc;
2794 xscale->trace.pc_ok = 1;
2795 }
2796 }
2797
2798 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2)
2799 {
2800 struct arm_instruction instruction;
2801 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2802 {
2803 /* can't continue tracing with no image available */
2804 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2805 {
2806 return retval;
2807 }
2808 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2809 {
2810 /* TODO: handle incomplete images */
2811 }
2812 }
2813 command_print(cmd_ctx, "%s", instruction.text);
2814 }
2815
2816 trace_data = trace_data->next;
2817 }
2818
2819 return ERROR_OK;
2820 }
2821
2822 static void xscale_build_reg_cache(struct target *target)
2823 {
2824 struct xscale_common *xscale = target_to_xscale(target);
2825 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
2826 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2827 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2828 int i;
2829 int num_regs = sizeof(xscale_reg_arch_info) / sizeof(struct xscale_reg);
2830
2831 (*cache_p) = armv4_5_build_reg_cache(target, armv4_5);
2832 armv4_5->core_cache = (*cache_p);
2833
2834 /* register a register arch-type for XScale dbg registers only once */
2835 if (xscale_reg_arch_type == -1)
2836 xscale_reg_arch_type = register_reg_arch_type(xscale_get_reg, xscale_set_reg);
2837
2838 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2839 cache_p = &(*cache_p)->next;
2840
2841 /* fill in values for the xscale reg cache */
2842 (*cache_p)->name = "XScale registers";
2843 (*cache_p)->next = NULL;
2844 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
2845 (*cache_p)->num_regs = num_regs;
2846
2847 for (i = 0; i < num_regs; i++)
2848 {
2849 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2850 (*cache_p)->reg_list[i].value = calloc(4, 1);
2851 (*cache_p)->reg_list[i].dirty = 0;
2852 (*cache_p)->reg_list[i].valid = 0;
2853 (*cache_p)->reg_list[i].size = 32;
2854 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2855 (*cache_p)->reg_list[i].arch_type = xscale_reg_arch_type;
2856 arch_info[i] = xscale_reg_arch_info[i];
2857 arch_info[i].target = target;
2858 }
2859
2860 xscale->reg_cache = (*cache_p);
2861 }
2862
2863 static int xscale_init_target(struct command_context *cmd_ctx,
2864 struct target *target)
2865 {
2866 xscale_build_reg_cache(target);
2867 return ERROR_OK;
2868 }
2869
2870 static int xscale_init_arch_info(struct target *target,
2871 struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
2872 {
2873 struct arm *armv4_5;
2874 uint32_t high_reset_branch, low_reset_branch;
2875 int i;
2876
2877 armv4_5 = &xscale->armv4_5_common;
2878
2879 /* store architecture specfic data (none so far) */
2880 xscale->common_magic = XSCALE_COMMON_MAGIC;
2881
2882 /* we don't really *need* variant info ... */
2883 if (variant) {
2884 int ir_length = 0;
2885
2886 if (strcmp(variant, "pxa250") == 0
2887 || strcmp(variant, "pxa255") == 0
2888 || strcmp(variant, "pxa26x") == 0)
2889 ir_length = 5;
2890 else if (strcmp(variant, "pxa27x") == 0
2891 || strcmp(variant, "ixp42x") == 0
2892 || strcmp(variant, "ixp45x") == 0
2893 || strcmp(variant, "ixp46x") == 0)
2894 ir_length = 7;
2895 else
2896 LOG_WARNING("%s: unrecognized variant %s",
2897 tap->dotted_name, variant);
2898
2899 if (ir_length && ir_length != tap->ir_length) {
2900 LOG_WARNING("%s: IR length for %s is %d; fixing",
2901 tap->dotted_name, variant, ir_length);
2902 tap->ir_length = ir_length;
2903 }
2904 }
2905
2906 /* the debug handler isn't installed (and thus not running) at this time */
2907 xscale->handler_address = 0xfe000800;
2908
2909 /* clear the vectors we keep locally for reference */
2910 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2911 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2912
2913 /* no user-specified vectors have been configured yet */
2914 xscale->static_low_vectors_set = 0x0;
2915 xscale->static_high_vectors_set = 0x0;
2916
2917 /* calculate branches to debug handler */
2918 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
2919 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
2920
2921 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
2922 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
2923
2924 for (i = 1; i <= 7; i++)
2925 {
2926 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2927 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2928 }
2929
2930 /* 64kB aligned region used for DCache cleaning */
2931 xscale->cache_clean_address = 0xfffe0000;
2932
2933 xscale->hold_rst = 0;
2934 xscale->external_debug_break = 0;
2935
2936 xscale->ibcr_available = 2;
2937 xscale->ibcr0_used = 0;
2938 xscale->ibcr1_used = 0;
2939
2940 xscale->dbr_available = 2;
2941 xscale->dbr0_used = 0;
2942 xscale->dbr1_used = 0;
2943
2944 xscale->arm_bkpt = ARMV5_BKPT(0x0);
2945 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
2946
2947 xscale->vector_catch = 0x1;
2948
2949 xscale->trace.capture_status = TRACE_IDLE;
2950 xscale->trace.data = NULL;
2951 xscale->trace.image = NULL;
2952 xscale->trace.buffer_enabled = 0;
2953 xscale->trace.buffer_fill = 0;
2954
2955 /* prepare ARMv4/5 specific information */
2956 armv4_5->arch_info = xscale;
2957 armv4_5->read_core_reg = xscale_read_core_reg;
2958 armv4_5->write_core_reg = xscale_write_core_reg;
2959 armv4_5->full_context = xscale_full_context;
2960
2961 armv4_5_init_arch_info(target, armv4_5);
2962
2963 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
2964 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
2965 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
2966 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
2967 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
2968 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
2969 xscale->armv4_5_mmu.has_tiny_pages = 1;
2970 xscale->armv4_5_mmu.mmu_enabled = 0;
2971
2972 return ERROR_OK;
2973 }
2974
2975 static int xscale_target_create(struct target *target, Jim_Interp *interp)
2976 {
2977 struct xscale_common *xscale;
2978
2979 if (sizeof xscale_debug_handler - 1 > 0x800) {
2980 LOG_ERROR("debug_handler.bin: larger than 2kb");
2981 return ERROR_FAIL;
2982 }
2983
2984 xscale = calloc(1, sizeof(*xscale));
2985 if (!xscale)
2986 return ERROR_FAIL;
2987
2988 return xscale_init_arch_info(target, xscale, target->tap,
2989 target->variant);
2990 }
2991
2992 COMMAND_HANDLER(xscale_handle_debug_handler_command)
2993 {
2994 struct target *target = NULL;
2995 struct xscale_common *xscale;
2996 int retval;
2997 uint32_t handler_address;
2998
2999 if (argc < 2)
3000 {
3001 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3002 return ERROR_OK;
3003 }
3004
3005 if ((target = get_target(args[0])) == NULL)
3006 {
3007 LOG_ERROR("target '%s' not defined", args[0]);
3008 return ERROR_FAIL;
3009 }
3010
3011 xscale = target_to_xscale(target);
3012 retval = xscale_verify_pointer(cmd_ctx, xscale);
3013 if (retval != ERROR_OK)
3014 return retval;
3015
3016 COMMAND_PARSE_NUMBER(u32, args[1], handler_address);
3017
3018 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3019 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3020 {
3021 xscale->handler_address = handler_address;
3022 }
3023 else
3024 {
3025 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3026 return ERROR_FAIL;
3027 }
3028
3029 return ERROR_OK;
3030 }
3031
3032 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3033 {
3034 struct target *target = NULL;
3035 struct xscale_common *xscale;
3036 int retval;
3037 uint32_t cache_clean_address;
3038
3039 if (argc < 2)
3040 {
3041 return ERROR_COMMAND_SYNTAX_ERROR;
3042 }
3043
3044 target = get_target(args[0]);
3045 if (target == NULL)
3046 {
3047 LOG_ERROR("target '%s' not defined", args[0]);
3048 return ERROR_FAIL;
3049 }
3050 xscale = target_to_xscale(target);
3051 retval = xscale_verify_pointer(cmd_ctx, xscale);
3052 if (retval != ERROR_OK)
3053 return retval;
3054
3055 COMMAND_PARSE_NUMBER(u32, args[1], cache_clean_address);
3056
3057 if (cache_clean_address & 0xffff)
3058 {
3059 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3060 }
3061 else
3062 {
3063 xscale->cache_clean_address = cache_clean_address;
3064 }
3065
3066 return ERROR_OK;
3067 }
3068
3069 COMMAND_HANDLER(xscale_handle_cache_info_command)
3070 {
3071 struct target *target = get_current_target(cmd_ctx);
3072 struct xscale_common *xscale = target_to_xscale(target);
3073 int retval;
3074
3075 retval = xscale_verify_pointer(cmd_ctx, xscale);
3076 if (retval != ERROR_OK)
3077 return retval;
3078
3079 return armv4_5_handle_cache_info_command(cmd_ctx, &xscale->armv4_5_mmu.armv4_5_cache);
3080 }
3081
3082 static int xscale_virt2phys(struct target *target,
3083 uint32_t virtual, uint32_t *physical)
3084 {
3085 struct xscale_common *xscale = target_to_xscale(target);
3086 int type;
3087 uint32_t cb;
3088 int domain;
3089 uint32_t ap;
3090
3091 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3092 LOG_ERROR(xscale_not);
3093 return ERROR_TARGET_INVALID;
3094 }
3095
3096 uint32_t ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3097 if (type == -