ARM: add arm_mode_name()
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
29
30 #include "breakpoints.h"
31 #include "xscale.h"
32 #include "target_type.h"
33 #include "arm_jtag.h"
34 #include "arm_simulator.h"
35 #include "arm_disassembler.h"
36 #include "time_support.h"
37 #include "register.h"
38 #include "image.h"
39
40
41 /*
42 * Important XScale documents available as of October 2009 include:
43 *
44 * Intel XScale® Core Developer’s Manual, January 2004
45 * Order Number: 273473-002
46 * This has a chapter detailing debug facilities, and punts some
47 * details to chip-specific microarchitecture documents.
48 *
49 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
50 * Document Number: 273539-005
51 * Less detailed than the developer's manual, but summarizes those
52 * missing details (for most XScales) and gives LOTS of notes about
53 * debugger/handler interaction issues. Presents a simpler reset
54 * and load-handler sequence than the arch doc. (Note, OpenOCD
55 * doesn't currently support "Hot-Debug" as defined there.)
56 *
57 * Chip-specific microarchitecture documents may also be useful.
58 */
59
60
61 /* forward declarations */
62 static int xscale_resume(struct target *, int current,
63 uint32_t address, int handle_breakpoints, int debug_execution);
64 static int xscale_debug_entry(struct target *);
65 static int xscale_restore_context(struct target *);
66 static int xscale_get_reg(struct reg *reg);
67 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
68 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
69 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
70 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
71 static int xscale_read_trace(struct target *);
72
73
74 /* This XScale "debug handler" is loaded into the processor's
75 * mini-ICache, which is 2K of code writable only via JTAG.
76 *
77 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
78 * binary files cleanly. It's string oriented, and terminates them
79 * with a NUL character. Better would be to generate the constants
80 * and let other code decide names, scoping, and other housekeeping.
81 */
82 static /* unsigned const char xscale_debug_handler[] = ... */
83 #include "xscale_debug.h"
84
85 static char *const xscale_reg_list[] =
86 {
87 "XSCALE_MAINID", /* 0 */
88 "XSCALE_CACHETYPE",
89 "XSCALE_CTRL",
90 "XSCALE_AUXCTRL",
91 "XSCALE_TTB",
92 "XSCALE_DAC",
93 "XSCALE_FSR",
94 "XSCALE_FAR",
95 "XSCALE_PID",
96 "XSCALE_CPACCESS",
97 "XSCALE_IBCR0", /* 10 */
98 "XSCALE_IBCR1",
99 "XSCALE_DBR0",
100 "XSCALE_DBR1",
101 "XSCALE_DBCON",
102 "XSCALE_TBREG",
103 "XSCALE_CHKPT0",
104 "XSCALE_CHKPT1",
105 "XSCALE_DCSR",
106 "XSCALE_TX",
107 "XSCALE_RX", /* 20 */
108 "XSCALE_TXRXCTRL",
109 };
110
111 static const struct xscale_reg xscale_reg_arch_info[] =
112 {
113 {XSCALE_MAINID, NULL},
114 {XSCALE_CACHETYPE, NULL},
115 {XSCALE_CTRL, NULL},
116 {XSCALE_AUXCTRL, NULL},
117 {XSCALE_TTB, NULL},
118 {XSCALE_DAC, NULL},
119 {XSCALE_FSR, NULL},
120 {XSCALE_FAR, NULL},
121 {XSCALE_PID, NULL},
122 {XSCALE_CPACCESS, NULL},
123 {XSCALE_IBCR0, NULL},
124 {XSCALE_IBCR1, NULL},
125 {XSCALE_DBR0, NULL},
126 {XSCALE_DBR1, NULL},
127 {XSCALE_DBCON, NULL},
128 {XSCALE_TBREG, NULL},
129 {XSCALE_CHKPT0, NULL},
130 {XSCALE_CHKPT1, NULL},
131 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
132 {-1, NULL}, /* TX accessed via JTAG */
133 {-1, NULL}, /* RX accessed via JTAG */
134 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
135 };
136
137 /* convenience wrapper to access XScale specific registers */
138 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
139 {
140 uint8_t buf[4];
141
142 buf_set_u32(buf, 0, 32, value);
143
144 return xscale_set_reg(reg, buf);
145 }
146
147 static const char xscale_not[] = "target is not an XScale";
148
149 static int xscale_verify_pointer(struct command_context *cmd_ctx,
150 struct xscale_common *xscale)
151 {
152 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
153 command_print(cmd_ctx, xscale_not);
154 return ERROR_TARGET_INVALID;
155 }
156 return ERROR_OK;
157 }
158
159 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr)
160 {
161 if (tap == NULL)
162 return ERROR_FAIL;
163
164 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
165 {
166 struct scan_field field;
167 uint8_t scratch[4];
168
169 memset(&field, 0, sizeof field);
170 field.tap = tap;
171 field.num_bits = tap->ir_length;
172 field.out_value = scratch;
173 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
174
175 jtag_add_ir_scan(1, &field, jtag_get_end_state());
176 }
177
178 return ERROR_OK;
179 }
180
181 static int xscale_read_dcsr(struct target *target)
182 {
183 struct xscale_common *xscale = target_to_xscale(target);
184 int retval;
185 struct scan_field fields[3];
186 uint8_t field0 = 0x0;
187 uint8_t field0_check_value = 0x2;
188 uint8_t field0_check_mask = 0x7;
189 uint8_t field2 = 0x0;
190 uint8_t field2_check_value = 0x0;
191 uint8_t field2_check_mask = 0x1;
192
193 jtag_set_end_state(TAP_DRPAUSE);
194 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
195
196 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
197 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
198
199 memset(&fields, 0, sizeof fields);
200
201 fields[0].tap = target->tap;
202 fields[0].num_bits = 3;
203 fields[0].out_value = &field0;
204 uint8_t tmp;
205 fields[0].in_value = &tmp;
206
207 fields[1].tap = target->tap;
208 fields[1].num_bits = 32;
209 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
210
211 fields[2].tap = target->tap;
212 fields[2].num_bits = 1;
213 fields[2].out_value = &field2;
214 uint8_t tmp2;
215 fields[2].in_value = &tmp2;
216
217 jtag_add_dr_scan(3, fields, jtag_get_end_state());
218
219 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
220 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
221
222 if ((retval = jtag_execute_queue()) != ERROR_OK)
223 {
224 LOG_ERROR("JTAG error while reading DCSR");
225 return retval;
226 }
227
228 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
229 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
230
231 /* write the register with the value we just read
232 * on this second pass, only the first bit of field0 is guaranteed to be 0)
233 */
234 field0_check_mask = 0x1;
235 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
236 fields[1].in_value = NULL;
237
238 jtag_set_end_state(TAP_IDLE);
239
240 jtag_add_dr_scan(3, fields, jtag_get_end_state());
241
242 /* DANGER!!! this must be here. It will make sure that the arguments
243 * to jtag_set_check_value() does not go out of scope! */
244 return jtag_execute_queue();
245 }
246
247
248 static void xscale_getbuf(jtag_callback_data_t arg)
249 {
250 uint8_t *in = (uint8_t *)arg;
251 *((uint32_t *)in) = buf_get_u32(in, 0, 32);
252 }
253
254 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
255 {
256 if (num_words == 0)
257 return ERROR_INVALID_ARGUMENTS;
258
259 int retval = ERROR_OK;
260 tap_state_t path[3];
261 struct scan_field fields[3];
262 uint8_t *field0 = malloc(num_words * 1);
263 uint8_t field0_check_value = 0x2;
264 uint8_t field0_check_mask = 0x6;
265 uint32_t *field1 = malloc(num_words * 4);
266 uint8_t field2_check_value = 0x0;
267 uint8_t field2_check_mask = 0x1;
268 int words_done = 0;
269 int words_scheduled = 0;
270 int i;
271
272 path[0] = TAP_DRSELECT;
273 path[1] = TAP_DRCAPTURE;
274 path[2] = TAP_DRSHIFT;
275
276 memset(&fields, 0, sizeof fields);
277
278 fields[0].tap = target->tap;
279 fields[0].num_bits = 3;
280 fields[0].check_value = &field0_check_value;
281 fields[0].check_mask = &field0_check_mask;
282
283 fields[1].tap = target->tap;
284 fields[1].num_bits = 32;
285
286 fields[2].tap = target->tap;
287 fields[2].num_bits = 1;
288 fields[2].check_value = &field2_check_value;
289 fields[2].check_mask = &field2_check_mask;
290
291 jtag_set_end_state(TAP_IDLE);
292 xscale_jtag_set_instr(target->tap, XSCALE_DBGTX);
293 jtag_add_runtest(1, jtag_get_end_state()); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
294
295 /* repeat until all words have been collected */
296 int attempts = 0;
297 while (words_done < num_words)
298 {
299 /* schedule reads */
300 words_scheduled = 0;
301 for (i = words_done; i < num_words; i++)
302 {
303 fields[0].in_value = &field0[i];
304
305 jtag_add_pathmove(3, path);
306
307 fields[1].in_value = (uint8_t *)(field1 + i);
308
309 jtag_add_dr_scan_check(3, fields, jtag_set_end_state(TAP_IDLE));
310
311 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
312
313 words_scheduled++;
314 }
315
316 if ((retval = jtag_execute_queue()) != ERROR_OK)
317 {
318 LOG_ERROR("JTAG error while receiving data from debug handler");
319 break;
320 }
321
322 /* examine results */
323 for (i = words_done; i < num_words; i++)
324 {
325 if (!(field0[0] & 1))
326 {
327 /* move backwards if necessary */
328 int j;
329 for (j = i; j < num_words - 1; j++)
330 {
331 field0[j] = field0[j + 1];
332 field1[j] = field1[j + 1];
333 }
334 words_scheduled--;
335 }
336 }
337 if (words_scheduled == 0)
338 {
339 if (attempts++==1000)
340 {
341 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
342 retval = ERROR_TARGET_TIMEOUT;
343 break;
344 }
345 }
346
347 words_done += words_scheduled;
348 }
349
350 for (i = 0; i < num_words; i++)
351 *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
352
353 free(field1);
354
355 return retval;
356 }
357
358 static int xscale_read_tx(struct target *target, int consume)
359 {
360 struct xscale_common *xscale = target_to_xscale(target);
361 tap_state_t path[3];
362 tap_state_t noconsume_path[6];
363 int retval;
364 struct timeval timeout, now;
365 struct scan_field fields[3];
366 uint8_t field0_in = 0x0;
367 uint8_t field0_check_value = 0x2;
368 uint8_t field0_check_mask = 0x6;
369 uint8_t field2_check_value = 0x0;
370 uint8_t field2_check_mask = 0x1;
371
372 jtag_set_end_state(TAP_IDLE);
373
374 xscale_jtag_set_instr(target->tap, XSCALE_DBGTX);
375
376 path[0] = TAP_DRSELECT;
377 path[1] = TAP_DRCAPTURE;
378 path[2] = TAP_DRSHIFT;
379
380 noconsume_path[0] = TAP_DRSELECT;
381 noconsume_path[1] = TAP_DRCAPTURE;
382 noconsume_path[2] = TAP_DREXIT1;
383 noconsume_path[3] = TAP_DRPAUSE;
384 noconsume_path[4] = TAP_DREXIT2;
385 noconsume_path[5] = TAP_DRSHIFT;
386
387 memset(&fields, 0, sizeof fields);
388
389 fields[0].tap = target->tap;
390 fields[0].num_bits = 3;
391 fields[0].in_value = &field0_in;
392
393 fields[1].tap = target->tap;
394 fields[1].num_bits = 32;
395 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
396
397 fields[2].tap = target->tap;
398 fields[2].num_bits = 1;
399 uint8_t tmp;
400 fields[2].in_value = &tmp;
401
402 gettimeofday(&timeout, NULL);
403 timeval_add_time(&timeout, 1, 0);
404
405 for (;;)
406 {
407 /* if we want to consume the register content (i.e. clear TX_READY),
408 * we have to go straight from Capture-DR to Shift-DR
409 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
410 */
411 if (consume)
412 jtag_add_pathmove(3, path);
413 else
414 {
415 jtag_add_pathmove(sizeof(noconsume_path)/sizeof(*noconsume_path), noconsume_path);
416 }
417
418 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
419
420 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
421 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
422
423 if ((retval = jtag_execute_queue()) != ERROR_OK)
424 {
425 LOG_ERROR("JTAG error while reading TX");
426 return ERROR_TARGET_TIMEOUT;
427 }
428
429 gettimeofday(&now, NULL);
430 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
431 {
432 LOG_ERROR("time out reading TX register");
433 return ERROR_TARGET_TIMEOUT;
434 }
435 if (!((!(field0_in & 1)) && consume))
436 {
437 goto done;
438 }
439 if (debug_level >= 3)
440 {
441 LOG_DEBUG("waiting 100ms");
442 alive_sleep(100); /* avoid flooding the logs */
443 } else
444 {
445 keep_alive();
446 }
447 }
448 done:
449
450 if (!(field0_in & 1))
451 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
452
453 return ERROR_OK;
454 }
455
456 static int xscale_write_rx(struct target *target)
457 {
458 struct xscale_common *xscale = target_to_xscale(target);
459 int retval;
460 struct timeval timeout, now;
461 struct scan_field fields[3];
462 uint8_t field0_out = 0x0;
463 uint8_t field0_in = 0x0;
464 uint8_t field0_check_value = 0x2;
465 uint8_t field0_check_mask = 0x6;
466 uint8_t field2 = 0x0;
467 uint8_t field2_check_value = 0x0;
468 uint8_t field2_check_mask = 0x1;
469
470 jtag_set_end_state(TAP_IDLE);
471
472 xscale_jtag_set_instr(target->tap, XSCALE_DBGRX);
473
474 memset(&fields, 0, sizeof fields);
475
476 fields[0].tap = target->tap;
477 fields[0].num_bits = 3;
478 fields[0].out_value = &field0_out;
479 fields[0].in_value = &field0_in;
480
481 fields[1].tap = target->tap;
482 fields[1].num_bits = 32;
483 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
484
485 fields[2].tap = target->tap;
486 fields[2].num_bits = 1;
487 fields[2].out_value = &field2;
488 uint8_t tmp;
489 fields[2].in_value = &tmp;
490
491 gettimeofday(&timeout, NULL);
492 timeval_add_time(&timeout, 1, 0);
493
494 /* poll until rx_read is low */
495 LOG_DEBUG("polling RX");
496 for (;;)
497 {
498 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
499
500 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
501 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
502
503 if ((retval = jtag_execute_queue()) != ERROR_OK)
504 {
505 LOG_ERROR("JTAG error while writing RX");
506 return retval;
507 }
508
509 gettimeofday(&now, NULL);
510 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
511 {
512 LOG_ERROR("time out writing RX register");
513 return ERROR_TARGET_TIMEOUT;
514 }
515 if (!(field0_in & 1))
516 goto done;
517 if (debug_level >= 3)
518 {
519 LOG_DEBUG("waiting 100ms");
520 alive_sleep(100); /* avoid flooding the logs */
521 } else
522 {
523 keep_alive();
524 }
525 }
526 done:
527
528 /* set rx_valid */
529 field2 = 0x1;
530 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
531
532 if ((retval = jtag_execute_queue()) != ERROR_OK)
533 {
534 LOG_ERROR("JTAG error while writing RX");
535 return retval;
536 }
537
538 return ERROR_OK;
539 }
540
541 /* send count elements of size byte to the debug handler */
542 static int xscale_send(struct target *target, uint8_t *buffer, int count, int size)
543 {
544 uint32_t t[3];
545 int bits[3];
546 int retval;
547 int done_count = 0;
548
549 jtag_set_end_state(TAP_IDLE);
550
551 xscale_jtag_set_instr(target->tap, XSCALE_DBGRX);
552
553 bits[0]=3;
554 t[0]=0;
555 bits[1]=32;
556 t[2]=1;
557 bits[2]=1;
558 int endianness = target->endianness;
559 while (done_count++ < count)
560 {
561 switch (size)
562 {
563 case 4:
564 if (endianness == TARGET_LITTLE_ENDIAN)
565 {
566 t[1]=le_to_h_u32(buffer);
567 } else
568 {
569 t[1]=be_to_h_u32(buffer);
570 }
571 break;
572 case 2:
573 if (endianness == TARGET_LITTLE_ENDIAN)
574 {
575 t[1]=le_to_h_u16(buffer);
576 } else
577 {
578 t[1]=be_to_h_u16(buffer);
579 }
580 break;
581 case 1:
582 t[1]=buffer[0];
583 break;
584 default:
585 LOG_ERROR("BUG: size neither 4, 2 nor 1");
586 return ERROR_INVALID_ARGUMENTS;
587 }
588 jtag_add_dr_out(target->tap,
589 3,
590 bits,
591 t,
592 jtag_set_end_state(TAP_IDLE));
593 buffer += size;
594 }
595
596 if ((retval = jtag_execute_queue()) != ERROR_OK)
597 {
598 LOG_ERROR("JTAG error while sending data to debug handler");
599 return retval;
600 }
601
602 return ERROR_OK;
603 }
604
605 static int xscale_send_u32(struct target *target, uint32_t value)
606 {
607 struct xscale_common *xscale = target_to_xscale(target);
608
609 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
610 return xscale_write_rx(target);
611 }
612
613 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
614 {
615 struct xscale_common *xscale = target_to_xscale(target);
616 int retval;
617 struct scan_field fields[3];
618 uint8_t field0 = 0x0;
619 uint8_t field0_check_value = 0x2;
620 uint8_t field0_check_mask = 0x7;
621 uint8_t field2 = 0x0;
622 uint8_t field2_check_value = 0x0;
623 uint8_t field2_check_mask = 0x1;
624
625 if (hold_rst != -1)
626 xscale->hold_rst = hold_rst;
627
628 if (ext_dbg_brk != -1)
629 xscale->external_debug_break = ext_dbg_brk;
630
631 jtag_set_end_state(TAP_IDLE);
632 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
633
634 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
635 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
636
637 memset(&fields, 0, sizeof fields);
638
639 fields[0].tap = target->tap;
640 fields[0].num_bits = 3;
641 fields[0].out_value = &field0;
642 uint8_t tmp;
643 fields[0].in_value = &tmp;
644
645 fields[1].tap = target->tap;
646 fields[1].num_bits = 32;
647 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
648
649 fields[2].tap = target->tap;
650 fields[2].num_bits = 1;
651 fields[2].out_value = &field2;
652 uint8_t tmp2;
653 fields[2].in_value = &tmp2;
654
655 jtag_add_dr_scan(3, fields, jtag_get_end_state());
656
657 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
658 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
659
660 if ((retval = jtag_execute_queue()) != ERROR_OK)
661 {
662 LOG_ERROR("JTAG error while writing DCSR");
663 return retval;
664 }
665
666 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
667 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
668
669 return ERROR_OK;
670 }
671
672 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
673 static unsigned int parity (unsigned int v)
674 {
675 // unsigned int ov = v;
676 v ^= v >> 16;
677 v ^= v >> 8;
678 v ^= v >> 4;
679 v &= 0xf;
680 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
681 return (0x6996 >> v) & 1;
682 }
683
684 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
685 {
686 uint8_t packet[4];
687 uint8_t cmd;
688 int word;
689 struct scan_field fields[2];
690
691 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
692
693 /* LDIC into IR */
694 jtag_set_end_state(TAP_IDLE);
695 xscale_jtag_set_instr(target->tap, XSCALE_LDIC);
696
697 /* CMD is b011 to load a cacheline into the Mini ICache.
698 * Loading into the main ICache is deprecated, and unused.
699 * It's followed by three zero bits, and 27 address bits.
700 */
701 buf_set_u32(&cmd, 0, 6, 0x3);
702
703 /* virtual address of desired cache line */
704 buf_set_u32(packet, 0, 27, va >> 5);
705
706 memset(&fields, 0, sizeof fields);
707
708 fields[0].tap = target->tap;
709 fields[0].num_bits = 6;
710 fields[0].out_value = &cmd;
711
712 fields[1].tap = target->tap;
713 fields[1].num_bits = 27;
714 fields[1].out_value = packet;
715
716 jtag_add_dr_scan(2, fields, jtag_get_end_state());
717
718 /* rest of packet is a cacheline: 8 instructions, with parity */
719 fields[0].num_bits = 32;
720 fields[0].out_value = packet;
721
722 fields[1].num_bits = 1;
723 fields[1].out_value = &cmd;
724
725 for (word = 0; word < 8; word++)
726 {
727 buf_set_u32(packet, 0, 32, buffer[word]);
728
729 uint32_t value;
730 memcpy(&value, packet, sizeof(uint32_t));
731 cmd = parity(value);
732
733 jtag_add_dr_scan(2, fields, jtag_get_end_state());
734 }
735
736 return jtag_execute_queue();
737 }
738
739 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
740 {
741 uint8_t packet[4];
742 uint8_t cmd;
743 struct scan_field fields[2];
744
745 jtag_set_end_state(TAP_IDLE);
746 xscale_jtag_set_instr(target->tap, XSCALE_LDIC);
747
748 /* CMD for invalidate IC line b000, bits [6:4] b000 */
749 buf_set_u32(&cmd, 0, 6, 0x0);
750
751 /* virtual address of desired cache line */
752 buf_set_u32(packet, 0, 27, va >> 5);
753
754 memset(&fields, 0, sizeof fields);
755
756 fields[0].tap = target->tap;
757 fields[0].num_bits = 6;
758 fields[0].out_value = &cmd;
759
760 fields[1].tap = target->tap;
761 fields[1].num_bits = 27;
762 fields[1].out_value = packet;
763
764 jtag_add_dr_scan(2, fields, jtag_get_end_state());
765
766 return ERROR_OK;
767 }
768
769 static int xscale_update_vectors(struct target *target)
770 {
771 struct xscale_common *xscale = target_to_xscale(target);
772 int i;
773 int retval;
774
775 uint32_t low_reset_branch, high_reset_branch;
776
777 for (i = 1; i < 8; i++)
778 {
779 /* if there's a static vector specified for this exception, override */
780 if (xscale->static_high_vectors_set & (1 << i))
781 {
782 xscale->high_vectors[i] = xscale->static_high_vectors[i];
783 }
784 else
785 {
786 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
787 if (retval == ERROR_TARGET_TIMEOUT)
788 return retval;
789 if (retval != ERROR_OK)
790 {
791 /* Some of these reads will fail as part of normal execution */
792 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
793 }
794 }
795 }
796
797 for (i = 1; i < 8; i++)
798 {
799 if (xscale->static_low_vectors_set & (1 << i))
800 {
801 xscale->low_vectors[i] = xscale->static_low_vectors[i];
802 }
803 else
804 {
805 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
806 if (retval == ERROR_TARGET_TIMEOUT)
807 return retval;
808 if (retval != ERROR_OK)
809 {
810 /* Some of these reads will fail as part of normal execution */
811 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
812 }
813 }
814 }
815
816 /* calculate branches to debug handler */
817 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
818 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
819
820 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
821 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
822
823 /* invalidate and load exception vectors in mini i-cache */
824 xscale_invalidate_ic_line(target, 0x0);
825 xscale_invalidate_ic_line(target, 0xffff0000);
826
827 xscale_load_ic(target, 0x0, xscale->low_vectors);
828 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
829
830 return ERROR_OK;
831 }
832
833 static int xscale_arch_state(struct target *target)
834 {
835 struct xscale_common *xscale = target_to_xscale(target);
836 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
837
838 static const char *state[] =
839 {
840 "disabled", "enabled"
841 };
842
843 static const char *arch_dbg_reason[] =
844 {
845 "", "\n(processor reset)", "\n(trace buffer full)"
846 };
847
848 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
849 {
850 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
851 return ERROR_INVALID_ARGUMENTS;
852 }
853
854 LOG_USER("target halted in %s state due to %s, current mode: %s\n"
855 "cpsr: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "\n"
856 "MMU: %s, D-Cache: %s, I-Cache: %s"
857 "%s",
858 armv4_5_state_strings[armv4_5->core_state],
859 Jim_Nvp_value2name_simple(nvp_target_debug_reason, target->debug_reason)->name ,
860 arm_mode_name(armv4_5->core_mode),
861 buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32),
862 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32),
863 state[xscale->armv4_5_mmu.mmu_enabled],
864 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
865 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
866 arch_dbg_reason[xscale->arch_debug_reason]);
867
868 return ERROR_OK;
869 }
870
871 static int xscale_poll(struct target *target)
872 {
873 int retval = ERROR_OK;
874
875 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
876 {
877 enum target_state previous_state = target->state;
878 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
879 {
880
881 /* there's data to read from the tx register, we entered debug state */
882 target->state = TARGET_HALTED;
883
884 /* process debug entry, fetching current mode regs */
885 retval = xscale_debug_entry(target);
886 }
887 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
888 {
889 LOG_USER("error while polling TX register, reset CPU");
890 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
891 target->state = TARGET_HALTED;
892 }
893
894 /* debug_entry could have overwritten target state (i.e. immediate resume)
895 * don't signal event handlers in that case
896 */
897 if (target->state != TARGET_HALTED)
898 return ERROR_OK;
899
900 /* if target was running, signal that we halted
901 * otherwise we reentered from debug execution */
902 if (previous_state == TARGET_RUNNING)
903 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
904 else
905 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
906 }
907
908 return retval;
909 }
910
911 static int xscale_debug_entry(struct target *target)
912 {
913 struct xscale_common *xscale = target_to_xscale(target);
914 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
915 uint32_t pc;
916 uint32_t buffer[10];
917 int i;
918 int retval;
919 uint32_t moe;
920
921 /* clear external dbg break (will be written on next DCSR read) */
922 xscale->external_debug_break = 0;
923 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
924 return retval;
925
926 /* get r0, pc, r1 to r7 and cpsr */
927 if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
928 return retval;
929
930 /* move r0 from buffer to register cache */
931 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
932 armv4_5->core_cache->reg_list[0].dirty = 1;
933 armv4_5->core_cache->reg_list[0].valid = 1;
934 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
935
936 /* move pc from buffer to register cache */
937 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
938 armv4_5->core_cache->reg_list[15].dirty = 1;
939 armv4_5->core_cache->reg_list[15].valid = 1;
940 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
941
942 /* move data from buffer to register cache */
943 for (i = 1; i <= 7; i++)
944 {
945 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
946 armv4_5->core_cache->reg_list[i].dirty = 1;
947 armv4_5->core_cache->reg_list[i].valid = 1;
948 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
949 }
950
951 buf_set_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32, buffer[9]);
952 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty = 1;
953 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].valid = 1;
954 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
955
956 armv4_5->core_mode = buffer[9] & 0x1f;
957 if (armv4_5_mode_to_number(armv4_5->core_mode) == -1)
958 {
959 target->state = TARGET_UNKNOWN;
960 LOG_ERROR("cpsr contains invalid mode value - communication failure");
961 return ERROR_TARGET_FAILURE;
962 }
963 LOG_DEBUG("target entered debug state in %s mode",
964 arm_mode_name(armv4_5->core_mode));
965
966 if (buffer[9] & 0x20)
967 armv4_5->core_state = ARMV4_5_STATE_THUMB;
968 else
969 armv4_5->core_state = ARMV4_5_STATE_ARM;
970
971
972 if (armv4_5_mode_to_number(armv4_5->core_mode)==-1)
973 return ERROR_FAIL;
974
975 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
976 if ((armv4_5->core_mode != ARMV4_5_MODE_USR) && (armv4_5->core_mode != ARMV4_5_MODE_SYS))
977 {
978 xscale_receive(target, buffer, 8);
979 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
980 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).dirty = 0;
981 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).valid = 1;
982 }
983 else
984 {
985 /* r8 to r14, but no spsr */
986 xscale_receive(target, buffer, 7);
987 }
988
989 /* move data from buffer to register cache */
990 for (i = 8; i <= 14; i++)
991 {
992 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).value, 0, 32, buffer[i - 8]);
993 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 0;
994 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid = 1;
995 }
996
997 /* examine debug reason */
998 xscale_read_dcsr(target);
999 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
1000
1001 /* stored PC (for calculating fixup) */
1002 pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1003
1004 switch (moe)
1005 {
1006 case 0x0: /* Processor reset */
1007 target->debug_reason = DBG_REASON_DBGRQ;
1008 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
1009 pc -= 4;
1010 break;
1011 case 0x1: /* Instruction breakpoint hit */
1012 target->debug_reason = DBG_REASON_BREAKPOINT;
1013 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1014 pc -= 4;
1015 break;
1016 case 0x2: /* Data breakpoint hit */
1017 target->debug_reason = DBG_REASON_WATCHPOINT;
1018 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1019 pc -= 4;
1020 break;
1021 case 0x3: /* BKPT instruction executed */
1022 target->debug_reason = DBG_REASON_BREAKPOINT;
1023 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1024 pc -= 4;
1025 break;
1026 case 0x4: /* Ext. debug event */
1027 target->debug_reason = DBG_REASON_DBGRQ;
1028 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1029 pc -= 4;
1030 break;
1031 case 0x5: /* Vector trap occured */
1032 target->debug_reason = DBG_REASON_BREAKPOINT;
1033 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1034 pc -= 4;
1035 break;
1036 case 0x6: /* Trace buffer full break */
1037 target->debug_reason = DBG_REASON_DBGRQ;
1038 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1039 pc -= 4;
1040 break;
1041 case 0x7: /* Reserved (may flag Hot-Debug support) */
1042 default:
1043 LOG_ERROR("Method of Entry is 'Reserved'");
1044 exit(-1);
1045 break;
1046 }
1047
1048 /* apply PC fixup */
1049 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
1050
1051 /* on the first debug entry, identify cache type */
1052 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1053 {
1054 uint32_t cache_type_reg;
1055
1056 /* read cp15 cache type register */
1057 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1058 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1059
1060 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1061 }
1062
1063 /* examine MMU and Cache settings */
1064 /* read cp15 control register */
1065 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1066 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1067 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1068 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1069 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1070
1071 /* tracing enabled, read collected trace data */
1072 if (xscale->trace.buffer_enabled)
1073 {
1074 xscale_read_trace(target);
1075 xscale->trace.buffer_fill--;
1076
1077 /* resume if we're still collecting trace data */
1078 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1079 && (xscale->trace.buffer_fill > 0))
1080 {
1081 xscale_resume(target, 1, 0x0, 1, 0);
1082 }
1083 else
1084 {
1085 xscale->trace.buffer_enabled = 0;
1086 }
1087 }
1088
1089 return ERROR_OK;
1090 }
1091
1092 static int xscale_halt(struct target *target)
1093 {
1094 struct xscale_common *xscale = target_to_xscale(target);
1095
1096 LOG_DEBUG("target->state: %s",
1097 target_state_name(target));
1098
1099 if (target->state == TARGET_HALTED)
1100 {
1101 LOG_DEBUG("target was already halted");
1102 return ERROR_OK;
1103 }
1104 else if (target->state == TARGET_UNKNOWN)
1105 {
1106 /* this must not happen for a xscale target */
1107 LOG_ERROR("target was in unknown state when halt was requested");
1108 return ERROR_TARGET_INVALID;
1109 }
1110 else if (target->state == TARGET_RESET)
1111 {
1112 LOG_DEBUG("target->state == TARGET_RESET");
1113 }
1114 else
1115 {
1116 /* assert external dbg break */
1117 xscale->external_debug_break = 1;
1118 xscale_read_dcsr(target);
1119
1120 target->debug_reason = DBG_REASON_DBGRQ;
1121 }
1122
1123 return ERROR_OK;
1124 }
1125
1126 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1127 {
1128 struct xscale_common *xscale = target_to_xscale(target);
1129 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1130 int retval;
1131
1132 if (xscale->ibcr0_used)
1133 {
1134 struct breakpoint *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1135
1136 if (ibcr0_bp)
1137 {
1138 xscale_unset_breakpoint(target, ibcr0_bp);
1139 }
1140 else
1141 {
1142 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1143 exit(-1);
1144 }
1145 }
1146
1147 if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
1148 return retval;
1149
1150 return ERROR_OK;
1151 }
1152
1153 static int xscale_disable_single_step(struct target *target)
1154 {
1155 struct xscale_common *xscale = target_to_xscale(target);
1156 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1157 int retval;
1158
1159 if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
1160 return retval;
1161
1162 return ERROR_OK;
1163 }
1164
1165 static void xscale_enable_watchpoints(struct target *target)
1166 {
1167 struct watchpoint *watchpoint = target->watchpoints;
1168
1169 while (watchpoint)
1170 {
1171 if (watchpoint->set == 0)
1172 xscale_set_watchpoint(target, watchpoint);
1173 watchpoint = watchpoint->next;
1174 }
1175 }
1176
1177 static void xscale_enable_breakpoints(struct target *target)
1178 {
1179 struct breakpoint *breakpoint = target->breakpoints;
1180
1181 /* set any pending breakpoints */
1182 while (breakpoint)
1183 {
1184 if (breakpoint->set == 0)
1185 xscale_set_breakpoint(target, breakpoint);
1186 breakpoint = breakpoint->next;
1187 }
1188 }
1189
1190 static int xscale_resume(struct target *target, int current,
1191 uint32_t address, int handle_breakpoints, int debug_execution)
1192 {
1193 struct xscale_common *xscale = target_to_xscale(target);
1194 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
1195 struct breakpoint *breakpoint = target->breakpoints;
1196 uint32_t current_pc;
1197 int retval;
1198 int i;
1199
1200 LOG_DEBUG("-");
1201
1202 if (target->state != TARGET_HALTED)
1203 {
1204 LOG_WARNING("target not halted");
1205 return ERROR_TARGET_NOT_HALTED;
1206 }
1207
1208 if (!debug_execution)
1209 {
1210 target_free_all_working_areas(target);
1211 }
1212
1213 /* update vector tables */
1214 if ((retval = xscale_update_vectors(target)) != ERROR_OK)
1215 return retval;
1216
1217 /* current = 1: continue on current pc, otherwise continue at <address> */
1218 if (!current)
1219 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1220
1221 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1222
1223 /* if we're at the reset vector, we have to simulate the branch */
1224 if (current_pc == 0x0)
1225 {
1226 arm_simulate_step(target, NULL);
1227 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1228 }
1229
1230 /* the front-end may request us not to handle breakpoints */
1231 if (handle_breakpoints)
1232 {
1233 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1234 {
1235 uint32_t next_pc;
1236
1237 /* there's a breakpoint at the current PC, we have to step over it */
1238 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1239 xscale_unset_breakpoint(target, breakpoint);
1240
1241 /* calculate PC of next instruction */
1242 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1243 {
1244 uint32_t current_opcode;
1245 target_read_u32(target, current_pc, &current_opcode);
1246 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1247 }
1248
1249 LOG_DEBUG("enable single-step");
1250 xscale_enable_single_step(target, next_pc);
1251
1252 /* restore banked registers */
1253 xscale_restore_context(target);
1254
1255 /* send resume request (command 0x30 or 0x31)
1256 * clean the trace buffer if it is to be enabled (0x62) */
1257 if (xscale->trace.buffer_enabled)
1258 {
1259 xscale_send_u32(target, 0x62);
1260 xscale_send_u32(target, 0x31);
1261 }
1262 else
1263 xscale_send_u32(target, 0x30);
1264
1265 /* send CPSR */
1266 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1267 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1268
1269 for (i = 7; i >= 0; i--)
1270 {
1271 /* send register */
1272 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1273 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1274 }
1275
1276 /* send PC */
1277 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1278 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1279
1280 /* wait for and process debug entry */
1281 xscale_debug_entry(target);
1282
1283 LOG_DEBUG("disable single-step");
1284 xscale_disable_single_step(target);
1285
1286 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1287 xscale_set_breakpoint(target, breakpoint);
1288 }
1289 }
1290
1291 /* enable any pending breakpoints and watchpoints */
1292 xscale_enable_breakpoints(target);
1293 xscale_enable_watchpoints(target);
1294
1295 /* restore banked registers */
1296 xscale_restore_context(target);
1297
1298 /* send resume request (command 0x30 or 0x31)
1299 * clean the trace buffer if it is to be enabled (0x62) */
1300 if (xscale->trace.buffer_enabled)
1301 {
1302 xscale_send_u32(target, 0x62);
1303 xscale_send_u32(target, 0x31);
1304 }
1305 else
1306 xscale_send_u32(target, 0x30);
1307
1308 /* send CPSR */
1309 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1310 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1311
1312 for (i = 7; i >= 0; i--)
1313 {
1314 /* send register */
1315 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1316 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1317 }
1318
1319 /* send PC */
1320 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1321 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1322
1323 target->debug_reason = DBG_REASON_NOTHALTED;
1324
1325 if (!debug_execution)
1326 {
1327 /* registers are now invalid */
1328 armv4_5_invalidate_core_regs(target);
1329 target->state = TARGET_RUNNING;
1330 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1331 }
1332 else
1333 {
1334 target->state = TARGET_DEBUG_RUNNING;
1335 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1336 }
1337
1338 LOG_DEBUG("target resumed");
1339
1340 return ERROR_OK;
1341 }
1342
1343 static int xscale_step_inner(struct target *target, int current,
1344 uint32_t address, int handle_breakpoints)
1345 {
1346 struct xscale_common *xscale = target_to_xscale(target);
1347 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
1348 uint32_t next_pc;
1349 int retval;
1350 int i;
1351
1352 target->debug_reason = DBG_REASON_SINGLESTEP;
1353
1354 /* calculate PC of next instruction */
1355 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1356 {
1357 uint32_t current_opcode, current_pc;
1358 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1359
1360 target_read_u32(target, current_pc, &current_opcode);
1361 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1362 return retval;
1363 }
1364
1365 LOG_DEBUG("enable single-step");
1366 if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
1367 return retval;
1368
1369 /* restore banked registers */
1370 if ((retval = xscale_restore_context(target)) != ERROR_OK)
1371 return retval;
1372
1373 /* send resume request (command 0x30 or 0x31)
1374 * clean the trace buffer if it is to be enabled (0x62) */
1375 if (xscale->trace.buffer_enabled)
1376 {
1377 if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
1378 return retval;
1379 if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
1380 return retval;
1381 }
1382 else
1383 if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
1384 return retval;
1385
1386 /* send CPSR */
1387 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32))) != ERROR_OK)
1388 return retval;
1389 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1390
1391 for (i = 7; i >= 0; i--)
1392 {
1393 /* send register */
1394 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
1395 return retval;
1396 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1397 }
1398
1399 /* send PC */
1400 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))) != ERROR_OK)
1401 return retval;
1402 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1403
1404 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1405
1406 /* registers are now invalid */
1407 if ((retval = armv4_5_invalidate_core_regs(target)) != ERROR_OK)
1408 return retval;
1409
1410 /* wait for and process debug entry */
1411 if ((retval = xscale_debug_entry(target)) != ERROR_OK)
1412 return retval;
1413
1414 LOG_DEBUG("disable single-step");
1415 if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
1416 return retval;
1417
1418 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1419
1420 return ERROR_OK;
1421 }
1422
1423 static int xscale_step(struct target *target, int current,
1424 uint32_t address, int handle_breakpoints)
1425 {
1426 struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
1427 struct breakpoint *breakpoint = target->breakpoints;
1428
1429 uint32_t current_pc;
1430 int retval;
1431
1432 if (target->state != TARGET_HALTED)
1433 {
1434 LOG_WARNING("target not halted");
1435 return ERROR_TARGET_NOT_HALTED;
1436 }
1437
1438 /* current = 1: continue on current pc, otherwise continue at <address> */
1439 if (!current)
1440 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1441
1442 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1443
1444 /* if we're at the reset vector, we have to simulate the step */
1445 if (current_pc == 0x0)
1446 {
1447 if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
1448 return retval;
1449 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1450
1451 target->debug_reason = DBG_REASON_SINGLESTEP;
1452 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1453
1454 return ERROR_OK;
1455 }
1456
1457 /* the front-end may request us not to handle breakpoints */
1458 if (handle_breakpoints)
1459 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1460 {
1461 if ((retval = xscale_unset_breakpoint(target, breakpoint)) != ERROR_OK)
1462 return retval;
1463 }
1464
1465 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1466
1467 if (breakpoint)
1468 {
1469 xscale_set_breakpoint(target, breakpoint);
1470 }
1471
1472 LOG_DEBUG("target stepped");
1473
1474 return ERROR_OK;
1475
1476 }
1477
1478 static int xscale_assert_reset(struct target *target)
1479 {
1480 struct xscale_common *xscale = target_to_xscale(target);
1481
1482 LOG_DEBUG("target->state: %s",
1483 target_state_name(target));
1484
1485 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1486 * end up in T-L-R, which would reset JTAG
1487 */
1488 jtag_set_end_state(TAP_IDLE);
1489 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
1490
1491 /* set Hold reset, Halt mode and Trap Reset */
1492 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1493 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1494 xscale_write_dcsr(target, 1, 0);
1495
1496 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1497 xscale_jtag_set_instr(target->tap, 0x7f);
1498 jtag_execute_queue();
1499
1500 /* assert reset */
1501 jtag_add_reset(0, 1);
1502
1503 /* sleep 1ms, to be sure we fulfill any requirements */
1504 jtag_add_sleep(1000);
1505 jtag_execute_queue();
1506
1507 target->state = TARGET_RESET;
1508
1509 if (target->reset_halt)
1510 {
1511 int retval;
1512 if ((retval = target_halt(target)) != ERROR_OK)
1513 return retval;
1514 }
1515
1516 return ERROR_OK;
1517 }
1518
1519 static int xscale_deassert_reset(struct target *target)
1520 {
1521 struct xscale_common *xscale = target_to_xscale(target);
1522 struct breakpoint *breakpoint = target->breakpoints;
1523
1524 LOG_DEBUG("-");
1525
1526 xscale->ibcr_available = 2;
1527 xscale->ibcr0_used = 0;
1528 xscale->ibcr1_used = 0;
1529
1530 xscale->dbr_available = 2;
1531 xscale->dbr0_used = 0;
1532 xscale->dbr1_used = 0;
1533
1534 /* mark all hardware breakpoints as unset */
1535 while (breakpoint)
1536 {
1537 if (breakpoint->type == BKPT_HARD)
1538 {
1539 breakpoint->set = 0;
1540 }
1541 breakpoint = breakpoint->next;
1542 }
1543
1544 armv4_5_invalidate_core_regs(target);
1545
1546 /* FIXME mark hardware watchpoints got unset too. Also,
1547 * at least some of the XScale registers are invalid...
1548 */
1549
1550 /*
1551 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1552 * contents got invalidated. Safer to force that, so writing new
1553 * contents can't ever fail..
1554 */
1555 {
1556 uint32_t address;
1557 unsigned buf_cnt;
1558 const uint8_t *buffer = xscale_debug_handler;
1559 int retval;
1560
1561 /* release SRST */
1562 jtag_add_reset(0, 0);
1563
1564 /* wait 300ms; 150 and 100ms were not enough */
1565 jtag_add_sleep(300*1000);
1566
1567 jtag_add_runtest(2030, jtag_set_end_state(TAP_IDLE));
1568 jtag_execute_queue();
1569
1570 /* set Hold reset, Halt mode and Trap Reset */
1571 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1572 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1573 xscale_write_dcsr(target, 1, 0);
1574
1575 /* Load the debug handler into the mini-icache. Since
1576 * it's using halt mode (not monitor mode), it runs in
1577 * "Special Debug State" for access to registers, memory,
1578 * coprocessors, trace data, etc.
1579 */
1580 address = xscale->handler_address;
1581 for (unsigned binary_size = sizeof xscale_debug_handler - 1;
1582 binary_size > 0;
1583 binary_size -= buf_cnt, buffer += buf_cnt)
1584 {
1585 uint32_t cache_line[8];
1586 unsigned i;
1587
1588 buf_cnt = binary_size;
1589 if (buf_cnt > 32)
1590 buf_cnt = 32;
1591
1592 for (i = 0; i < buf_cnt; i += 4)
1593 {
1594 /* convert LE buffer to host-endian uint32_t */
1595 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1596 }
1597
1598 for (; i < 32; i += 4)
1599 {
1600 cache_line[i / 4] = 0xe1a08008;
1601 }
1602
1603 /* only load addresses other than the reset vectors */
1604 if ((address % 0x400) != 0x0)
1605 {
1606 retval = xscale_load_ic(target, address,
1607 cache_line);
1608 if (retval != ERROR_OK)
1609 return retval;
1610 }
1611
1612 address += buf_cnt;
1613 };
1614
1615 retval = xscale_load_ic(target, 0x0,
1616 xscale->low_vectors);
1617 if (retval != ERROR_OK)
1618 return retval;
1619 retval = xscale_load_ic(target, 0xffff0000,
1620 xscale->high_vectors);
1621 if (retval != ERROR_OK)
1622 return retval;
1623
1624 jtag_add_runtest(30, jtag_set_end_state(TAP_IDLE));
1625
1626 jtag_add_sleep(100000);
1627
1628 /* set Hold reset, Halt mode and Trap Reset */
1629 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1630 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1631 xscale_write_dcsr(target, 1, 0);
1632
1633 /* clear Hold reset to let the target run (should enter debug handler) */
1634 xscale_write_dcsr(target, 0, 1);
1635 target->state = TARGET_RUNNING;
1636
1637 if (!target->reset_halt)
1638 {
1639 jtag_add_sleep(10000);
1640
1641 /* we should have entered debug now */
1642 xscale_debug_entry(target);
1643 target->state = TARGET_HALTED;
1644
1645 /* resume the target */
1646 xscale_resume(target, 1, 0x0, 1, 0);
1647 }
1648 }
1649
1650 return ERROR_OK;
1651 }
1652
1653 static int xscale_read_core_reg(struct target *target, int num,
1654 enum armv4_5_mode mode)
1655 {
1656 LOG_ERROR("not implemented");
1657 return ERROR_OK;
1658 }
1659
1660 static int xscale_write_core_reg(struct target *target, int num,
1661 enum armv4_5_mode mode, uint32_t value)
1662 {
1663 LOG_ERROR("not implemented");
1664 return ERROR_OK;
1665 }
1666
1667 static int xscale_full_context(struct target *target)
1668 {
1669 struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
1670
1671 uint32_t *buffer;
1672
1673 int i, j;
1674
1675 LOG_DEBUG("-");
1676
1677 if (target->state != TARGET_HALTED)
1678 {
1679 LOG_WARNING("target not halted");
1680 return ERROR_TARGET_NOT_HALTED;
1681 }
1682
1683 buffer = malloc(4 * 8);
1684
1685 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1686 * we can't enter User mode on an XScale (unpredictable),
1687 * but User shares registers with SYS
1688 */
1689 for (i = 1; i < 7; i++)
1690 {
1691 int valid = 1;
1692
1693 /* check if there are invalid registers in the current mode
1694 */
1695 for (j = 0; j <= 16; j++)
1696 {
1697 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid == 0)
1698 valid = 0;
1699 }
1700
1701 if (!valid)
1702 {
1703 uint32_t tmp_cpsr;
1704
1705 /* request banked registers */
1706 xscale_send_u32(target, 0x0);
1707
1708 tmp_cpsr = 0x0;
1709 tmp_cpsr |= armv4_5_number_to_mode(i);
1710 tmp_cpsr |= 0xc0; /* I/F bits */
1711
1712 /* send CPSR for desired mode */
1713 xscale_send_u32(target, tmp_cpsr);
1714
1715 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1716 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1717 {
1718 xscale_receive(target, buffer, 8);
1719 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1720 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1721 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).valid = 1;
1722 }
1723 else
1724 {
1725 xscale_receive(target, buffer, 7);
1726 }
1727
1728 /* move data from buffer to register cache */
1729 for (j = 8; j <= 14; j++)
1730 {
1731 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).value, 0, 32, buffer[j - 8]);
1732 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1733 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid = 1;
1734 }
1735 }
1736 }
1737
1738 free(buffer);
1739
1740 return ERROR_OK;
1741 }
1742
1743 static int xscale_restore_context(struct target *target)
1744 {
1745 struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
1746
1747 int i, j;
1748
1749 if (target->state != TARGET_HALTED)
1750 {
1751 LOG_WARNING("target not halted");
1752 return ERROR_TARGET_NOT_HALTED;
1753 }
1754
1755 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1756 * we can't enter User mode on an XScale (unpredictable),
1757 * but User shares registers with SYS
1758 */
1759 for (i = 1; i < 7; i++)
1760 {
1761 int dirty = 0;
1762
1763 /* check if there are invalid registers in the current mode
1764 */
1765 for (j = 8; j <= 14; j++)
1766 {
1767 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty == 1)
1768 dirty = 1;
1769 }
1770
1771 /* if not USR/SYS, check if the SPSR needs to be written */
1772 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1773 {
1774 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty == 1)
1775 dirty = 1;
1776 }
1777
1778 if (dirty)
1779 {
1780 uint32_t tmp_cpsr;
1781
1782 /* send banked registers */
1783 xscale_send_u32(target, 0x1);
1784
1785 tmp_cpsr = 0x0;
1786 tmp_cpsr |= armv4_5_number_to_mode(i);
1787 tmp_cpsr |= 0xc0; /* I/F bits */
1788
1789 /* send CPSR for desired mode */
1790 xscale_send_u32(target, tmp_cpsr);
1791
1792 /* send banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1793 for (j = 8; j <= 14; j++)
1794 {
1795 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, j).value, 0, 32));
1796 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1797 }
1798
1799 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1800 {
1801 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32));
1802 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1803 }
1804 }
1805 }
1806
1807 return ERROR_OK;
1808 }
1809
1810 static int xscale_read_memory(struct target *target, uint32_t address,
1811 uint32_t size, uint32_t count, uint8_t *buffer)
1812 {
1813 struct xscale_common *xscale = target_to_xscale(target);
1814 uint32_t *buf32;
1815 uint32_t i;
1816 int retval;
1817
1818 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1819
1820 if (target->state != TARGET_HALTED)
1821 {
1822 LOG_WARNING("target not halted");
1823 return ERROR_TARGET_NOT_HALTED;
1824 }
1825
1826 /* sanitize arguments */
1827 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1828 return ERROR_INVALID_ARGUMENTS;
1829
1830 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1831 return ERROR_TARGET_UNALIGNED_ACCESS;
1832
1833 /* send memory read request (command 0x1n, n: access size) */
1834 if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
1835 return retval;
1836
1837 /* send base address for read request */
1838 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1839 return retval;
1840
1841 /* send number of requested data words */
1842 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1843 return retval;
1844
1845 /* receive data from target (count times 32-bit words in host endianness) */
1846 buf32 = malloc(4 * count);
1847 if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
1848 return retval;
1849
1850 /* extract data from host-endian buffer into byte stream */
1851 for (i = 0; i < count; i++)
1852 {
1853 switch (size)
1854 {
1855 case 4:
1856 target_buffer_set_u32(target, buffer, buf32[i]);
1857 buffer += 4;
1858 break;
1859 case 2:
1860 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1861 buffer += 2;
1862 break;
1863 case 1:
1864 *buffer++ = buf32[i] & 0xff;
1865 break;
1866 default:
1867 LOG_ERROR("invalid read size");
1868 return ERROR_INVALID_ARGUMENTS;
1869 }
1870 }
1871
1872 free(buf32);
1873
1874 /* examine DCSR, to see if Sticky Abort (SA) got set */
1875 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1876 return retval;
1877 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1878 {
1879 /* clear SA bit */
1880 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1881 return retval;
1882
1883 return ERROR_TARGET_DATA_ABORT;
1884 }
1885
1886 return ERROR_OK;
1887 }
1888
1889 static int xscale_write_memory(struct target *target, uint32_t address,
1890 uint32_t size, uint32_t count, uint8_t *buffer)
1891 {
1892 struct xscale_common *xscale = target_to_xscale(target);
1893 int retval;
1894
1895 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1896
1897 if (target->state != TARGET_HALTED)
1898 {
1899 LOG_WARNING("target not halted");
1900 return ERROR_TARGET_NOT_HALTED;
1901 }
1902
1903 /* sanitize arguments */
1904 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1905 return ERROR_INVALID_ARGUMENTS;
1906
1907 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1908 return ERROR_TARGET_UNALIGNED_ACCESS;
1909
1910 /* send memory write request (command 0x2n, n: access size) */
1911 if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
1912 return retval;
1913
1914 /* send base address for read request */
1915 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1916 return retval;
1917
1918 /* send number of requested data words to be written*/
1919 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1920 return retval;
1921
1922 /* extract data from host-endian buffer into byte stream */
1923 #if 0
1924 for (i = 0; i < count; i++)
1925 {
1926 switch (size)
1927 {
1928 case 4:
1929 value = target_buffer_get_u32(target, buffer);
1930 xscale_send_u32(target, value);
1931 buffer += 4;
1932 break;
1933 case 2:
1934 value = target_buffer_get_u16(target, buffer);
1935 xscale_send_u32(target, value);
1936 buffer += 2;
1937 break;
1938 case 1:
1939 value = *buffer;
1940 xscale_send_u32(target, value);
1941 buffer += 1;
1942 break;
1943 default:
1944 LOG_ERROR("should never get here");
1945 exit(-1);
1946 }
1947 }
1948 #endif
1949 if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
1950 return retval;
1951
1952 /* examine DCSR, to see if Sticky Abort (SA) got set */
1953 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1954 return retval;
1955 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1956 {
1957 /* clear SA bit */
1958 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1959 return retval;
1960
1961 return ERROR_TARGET_DATA_ABORT;
1962 }
1963
1964 return ERROR_OK;
1965 }
1966
1967 static int xscale_bulk_write_memory(struct target *target, uint32_t address,
1968 uint32_t count, uint8_t *buffer)
1969 {
1970 return xscale_write_memory(target, address, 4, count, buffer);
1971 }
1972
1973 static uint32_t xscale_get_ttb(struct target *target)
1974 {
1975 struct xscale_common *xscale = target_to_xscale(target);
1976 uint32_t ttb;
1977
1978 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
1979 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
1980
1981 return ttb;
1982 }
1983
1984 static void xscale_disable_mmu_caches(struct target *target, int mmu,
1985 int d_u_cache, int i_cache)
1986 {
1987 struct xscale_common *xscale = target_to_xscale(target);
1988 uint32_t cp15_control;
1989
1990 /* read cp15 control register */
1991 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1992 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1993
1994 if (mmu)
1995 cp15_control &= ~0x1U;
1996
1997 if (d_u_cache)
1998 {
1999 /* clean DCache */
2000 xscale_send_u32(target, 0x50);
2001 xscale_send_u32(target, xscale->cache_clean_address);
2002
2003 /* invalidate DCache */
2004 xscale_send_u32(target, 0x51);
2005
2006 cp15_control &= ~0x4U;
2007 }
2008
2009 if (i_cache)
2010 {
2011 /* invalidate ICache */
2012 xscale_send_u32(target, 0x52);
2013 cp15_control &= ~0x1000U;
2014 }
2015
2016 /* write new cp15 control register */
2017 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2018
2019 /* execute cpwait to ensure outstanding operations complete */
2020 xscale_send_u32(target, 0x53);
2021 }
2022
2023 static void xscale_enable_mmu_caches(struct target *target, int mmu,
2024 int d_u_cache, int i_cache)
2025 {
2026 struct xscale_common *xscale = target_to_xscale(target);
2027 uint32_t cp15_control;
2028
2029 /* read cp15 control register */
2030 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2031 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2032
2033 if (mmu)
2034 cp15_control |= 0x1U;
2035
2036 if (d_u_cache)
2037 cp15_control |= 0x4U;
2038
2039 if (i_cache)
2040 cp15_control |= 0x1000U;
2041
2042 /* write new cp15 control register */
2043 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2044
2045 /* execute cpwait to ensure outstanding operations complete */
2046 xscale_send_u32(target, 0x53);
2047 }
2048
2049 static int xscale_set_breakpoint(struct target *target,
2050 struct breakpoint *breakpoint)
2051 {
2052 int retval;
2053 struct xscale_common *xscale = target_to_xscale(target);
2054
2055 if (target->state != TARGET_HALTED)
2056 {
2057 LOG_WARNING("target not halted");
2058 return ERROR_TARGET_NOT_HALTED;
2059 }
2060
2061 if (breakpoint->set)
2062 {
2063 LOG_WARNING("breakpoint already set");
2064 return ERROR_OK;
2065 }
2066
2067 if (breakpoint->type == BKPT_HARD)
2068 {
2069 uint32_t value = breakpoint->address | 1;
2070 if (!xscale->ibcr0_used)
2071 {
2072 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2073 xscale->ibcr0_used = 1;
2074 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2075 }
2076 else if (!xscale->ibcr1_used)
2077 {
2078 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2079 xscale->ibcr1_used = 1;
2080 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2081 }
2082 else
2083 {
2084 LOG_ERROR("BUG: no hardware comparator available");
2085 return ERROR_OK;
2086 }
2087 }
2088 else if (breakpoint->type == BKPT_SOFT)
2089 {
2090 if (breakpoint->length == 4)
2091 {
2092 /* keep the original instruction in target endianness */
2093 if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2094 {
2095 return retval;
2096 }
2097 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2098 if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2099 {
2100 return retval;
2101 }
2102 }
2103 else
2104 {
2105 /* keep the original instruction in target endianness */
2106 if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2107 {
2108 return retval;
2109 }
2110 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2111 if ((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2112 {
2113 return retval;
2114 }
2115 }
2116 breakpoint->set = 1;
2117 }
2118
2119 return ERROR_OK;
2120 }
2121
2122 static int xscale_add_breakpoint(struct target *target,
2123 struct breakpoint *breakpoint)
2124 {
2125 struct xscale_common *xscale = target_to_xscale(target);
2126
2127 if (target->state != TARGET_HALTED)
2128 {
2129 LOG_WARNING("target not halted");
2130 return ERROR_TARGET_NOT_HALTED;
2131 }
2132
2133 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2134 {
2135 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2136 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2137 }
2138
2139 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2140 {
2141 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2142 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2143 }
2144
2145 if (breakpoint->type == BKPT_HARD)
2146 {
2147 xscale->ibcr_available--;
2148 }
2149
2150 return ERROR_OK;
2151 }
2152
2153 static int xscale_unset_breakpoint(struct target *target,
2154 struct breakpoint *breakpoint)
2155 {
2156 int retval;
2157 struct xscale_common *xscale = target_to_xscale(target);
2158
2159 if (target->state != TARGET_HALTED)
2160 {
2161 LOG_WARNING("target not halted");
2162 return ERROR_TARGET_NOT_HALTED;
2163 }
2164
2165 if (!breakpoint->set)
2166 {
2167 LOG_WARNING("breakpoint not set");
2168 return ERROR_OK;
2169 }
2170
2171 if (breakpoint->type == BKPT_HARD)
2172 {
2173 if (breakpoint->set == 1)
2174 {
2175 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2176 xscale->ibcr0_used = 0;
2177 }
2178 else if (breakpoint->set == 2)
2179 {
2180 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2181 xscale->ibcr1_used = 0;
2182 }
2183 breakpoint->set = 0;
2184 }
2185 else
2186 {
2187 /* restore original instruction (kept in target endianness) */
2188 if (breakpoint->length == 4)
2189 {
2190 if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2191 {
2192 return retval;
2193 }
2194 }
2195 else
2196 {
2197 if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2198 {
2199 return retval;
2200 }
2201 }
2202 breakpoint->set = 0;
2203 }
2204
2205 return ERROR_OK;
2206 }
2207
2208 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2209 {
2210 struct xscale_common *xscale = target_to_xscale(target);
2211
2212 if (target->state != TARGET_HALTED)
2213 {
2214 LOG_WARNING("target not halted");
2215 return ERROR_TARGET_NOT_HALTED;
2216 }
2217
2218 if (breakpoint->set)
2219 {
2220 xscale_unset_breakpoint(target, breakpoint);
2221 }
2222
2223 if (breakpoint->type == BKPT_HARD)
2224 xscale->ibcr_available++;
2225
2226 return ERROR_OK;
2227 }
2228
2229 static int xscale_set_watchpoint(struct target *target,
2230 struct watchpoint *watchpoint)
2231 {
2232 struct xscale_common *xscale = target_to_xscale(target);
2233 uint8_t enable = 0;
2234 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2235 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2236
2237 if (target->state != TARGET_HALTED)
2238 {
2239 LOG_WARNING("target not halted");
2240 return ERROR_TARGET_NOT_HALTED;
2241 }
2242
2243 xscale_get_reg(dbcon);
2244
2245 switch (watchpoint->rw)
2246 {
2247 case WPT_READ:
2248 enable = 0x3;
2249 break;
2250 case WPT_ACCESS:
2251 enable = 0x2;
2252 break;
2253 case WPT_WRITE:
2254 enable = 0x1;
2255 break;
2256 default:
2257 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2258 }
2259
2260 if (!xscale->dbr0_used)
2261 {
2262 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2263 dbcon_value |= enable;
2264 xscale_set_reg_u32(dbcon, dbcon_value);
2265 watchpoint->set = 1;
2266 xscale->dbr0_used = 1;
2267 }
2268 else if (!xscale->dbr1_used)
2269 {
2270 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2271 dbcon_value |= enable << 2;
2272 xscale_set_reg_u32(dbcon, dbcon_value);
2273 watchpoint->set = 2;
2274 xscale->dbr1_used = 1;
2275 }
2276 else
2277 {
2278 LOG_ERROR("BUG: no hardware comparator available");
2279 return ERROR_OK;
2280 }
2281
2282 return ERROR_OK;
2283 }
2284
2285 static int xscale_add_watchpoint(struct target *target,
2286 struct watchpoint *watchpoint)
2287 {
2288 struct xscale_common *xscale = target_to_xscale(target);
2289
2290 if (target->state != TARGET_HALTED)
2291 {
2292 LOG_WARNING("target not halted");
2293 return ERROR_TARGET_NOT_HALTED;
2294 }
2295
2296 if (xscale->dbr_available < 1)
2297 {
2298 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2299 }
2300
2301 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2302 {
2303 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2304 }
2305
2306 xscale->dbr_available--;
2307
2308 return ERROR_OK;
2309 }
2310
2311 static int xscale_unset_watchpoint(struct target *target,
2312 struct watchpoint *watchpoint)
2313 {
2314 struct xscale_common *xscale = target_to_xscale(target);
2315 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2316 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2317
2318 if (target->state != TARGET_HALTED)
2319 {
2320 LOG_WARNING("target not halted");
2321 return ERROR_TARGET_NOT_HALTED;
2322 }
2323
2324 if (!watchpoint->set)
2325 {
2326 LOG_WARNING("breakpoint not set");
2327 return ERROR_OK;
2328 }
2329
2330 if (watchpoint->set == 1)
2331 {
2332 dbcon_value &= ~0x3;
2333 xscale_set_reg_u32(dbcon, dbcon_value);
2334 xscale->dbr0_used = 0;
2335 }
2336 else if (watchpoint->set == 2)
2337 {
2338 dbcon_value &= ~0xc;
2339 xscale_set_reg_u32(dbcon, dbcon_value);
2340 xscale->dbr1_used = 0;
2341 }
2342 watchpoint->set = 0;
2343
2344 return ERROR_OK;
2345 }
2346
2347 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2348 {
2349 struct xscale_common *xscale = target_to_xscale(target);
2350
2351 if (target->state != TARGET_HALTED)
2352 {
2353 LOG_WARNING("target not halted");
2354 return ERROR_TARGET_NOT_HALTED;
2355 }
2356
2357 if (watchpoint->set)
2358 {
2359 xscale_unset_watchpoint(target, watchpoint);
2360 }
2361
2362 xscale->dbr_available++;
2363
2364 return ERROR_OK;
2365 }
2366
2367 static int xscale_get_reg(struct reg *reg)
2368 {
2369 struct xscale_reg *arch_info = reg->arch_info;
2370 struct target *target = arch_info->target;
2371 struct xscale_common *xscale = target_to_xscale(target);
2372
2373 /* DCSR, TX and RX are accessible via JTAG */
2374 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2375 {
2376 return xscale_read_dcsr(arch_info->target);
2377 }
2378 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2379 {
2380 /* 1 = consume register content */
2381 return xscale_read_tx(arch_info->target, 1);
2382 }
2383 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2384 {
2385 /* can't read from RX register (host -> debug handler) */
2386 return ERROR_OK;
2387 }
2388 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2389 {
2390 /* can't (explicitly) read from TXRXCTRL register */
2391 return ERROR_OK;
2392 }
2393 else /* Other DBG registers have to be transfered by the debug handler */
2394 {
2395 /* send CP read request (command 0x40) */
2396 xscale_send_u32(target, 0x40);
2397
2398 /* send CP register number */
2399 xscale_send_u32(target, arch_info->dbg_handler_number);
2400
2401 /* read register value */
2402 xscale_read_tx(target, 1);
2403 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2404
2405 reg->dirty = 0;
2406 reg->valid = 1;
2407 }
2408
2409 return ERROR_OK;
2410 }
2411
2412 static int xscale_set_reg(struct reg *reg, uint8_t* buf)
2413 {
2414 struct xscale_reg *arch_info = reg->arch_info;
2415 struct target *target = arch_info->target;
2416 struct xscale_common *xscale = target_to_xscale(target);
2417 uint32_t value = buf_get_u32(buf, 0, 32);
2418
2419 /* DCSR, TX and RX are accessible via JTAG */
2420 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2421 {
2422 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2423 return xscale_write_dcsr(arch_info->target, -1, -1);
2424 }
2425 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2426 {
2427 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2428 return xscale_write_rx(arch_info->target);
2429 }
2430 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2431 {
2432 /* can't write to TX register (debug-handler -> host) */
2433 return ERROR_OK;
2434 }
2435 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2436 {
2437 /* can't (explicitly) write to TXRXCTRL register */
2438 return ERROR_OK;
2439 }
2440 else /* Other DBG registers have to be transfered by the debug handler */
2441 {
2442 /* send CP write request (command 0x41) */
2443 xscale_send_u32(target, 0x41);
2444
2445 /* send CP register number */
2446 xscale_send_u32(target, arch_info->dbg_handler_number);
2447
2448 /* send CP register value */
2449 xscale_send_u32(target, value);
2450 buf_set_u32(reg->value, 0, 32, value);
2451 }
2452
2453 return ERROR_OK;
2454 }
2455
2456 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2457 {
2458 struct xscale_common *xscale = target_to_xscale(target);
2459 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2460 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2461
2462 /* send CP write request (command 0x41) */
2463 xscale_send_u32(target, 0x41);
2464
2465 /* send CP register number */
2466 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2467
2468 /* send CP register value */
2469 xscale_send_u32(target, value);
2470 buf_set_u32(dcsr->value, 0, 32, value);
2471
2472 return ERROR_OK;
2473 }
2474
2475 static int xscale_read_trace(struct target *target)
2476 {
2477 struct xscale_common *xscale = target_to_xscale(target);
2478 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
2479 struct xscale_trace_data **trace_data_p;
2480
2481 /* 258 words from debug handler
2482 * 256 trace buffer entries
2483 * 2 checkpoint addresses
2484 */
2485 uint32_t trace_buffer[258];
2486 int is_address[256];
2487 int i, j;
2488
2489 if (target->state != TARGET_HALTED)
2490 {
2491 LOG_WARNING("target must be stopped to read trace data");
2492 return ERROR_TARGET_NOT_HALTED;
2493 }
2494
2495 /* send read trace buffer command (command 0x61) */
2496 xscale_send_u32(target, 0x61);
2497
2498 /* receive trace buffer content */
2499 xscale_receive(target, trace_buffer, 258);
2500
2501 /* parse buffer backwards to identify address entries */
2502 for (i = 255; i >= 0; i--)
2503 {
2504 is_address[i] = 0;
2505 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2506 ((trace_buffer[i] & 0xf0) == 0xd0))
2507 {
2508 if (i >= 3)
2509 is_address[--i] = 1;
2510 if (i >= 2)
2511 is_address[--i] = 1;
2512 if (i >= 1)
2513 is_address[--i] = 1;
2514 if (i >= 0)
2515 is_address[--i] = 1;
2516 }
2517 }
2518
2519
2520 /* search first non-zero entry */
2521 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2522 ;
2523
2524 if (j == 256)
2525 {
2526 LOG_DEBUG("no trace data collected");
2527 return ERROR_XSCALE_NO_TRACE_DATA;
2528 }
2529
2530 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2531 ;
2532
2533 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2534 (*trace_data_p)->next = NULL;
2535 (*trace_data_p)->chkpt0 = trace_buffer[256];
2536 (*trace_data_p)->chkpt1 = trace_buffer[257];
2537 (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
2538 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2539 (*trace_data_p)->depth = 256 - j;
2540
2541 for (i = j; i < 256; i++)
2542 {
2543 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2544 if (is_address[i])
2545 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2546 else
2547 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2548 }
2549
2550 return ERROR_OK;
2551 }
2552
2553 static int xscale_read_instruction(struct target *target,
2554 struct arm_instruction *instruction)
2555 {
2556 struct xscale_common *xscale = target_to_xscale(target);
2557 int i;
2558 int section = -1;
2559 size_t size_read;
2560 uint32_t opcode;
2561 int retval;
2562
2563 if (!xscale->trace.image)
2564 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2565
2566 /* search for the section the current instruction belongs to */
2567 for (i = 0; i < xscale->trace.image->num_sections; i++)
2568 {
2569 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2570 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2571 {
2572 section = i;
2573 break;
2574 }
2575 }
2576
2577 if (section == -1)
2578 {
2579 /* current instruction couldn't be found in the image */
2580 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2581 }
2582
2583 if (xscale->trace.core_state == ARMV4_5_STATE_ARM)
2584 {
2585 uint8_t buf[4];
2586 if ((retval = image_read_section(xscale->trace.image, section,
2587 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2588 4, buf, &size_read)) != ERROR_OK)
2589 {
2590 LOG_ERROR("error while reading instruction: %i", retval);
2591 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2592 }
2593 opcode = target_buffer_get_u32(target, buf);
2594 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2595 }
2596 else if (xscale->trace.core_state == ARMV4_5_STATE_THUMB)
2597 {
2598 uint8_t buf[2];
2599 if ((retval = image_read_section(xscale->trace.image, section,
2600 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2601 2, buf, &size_read)) != ERROR_OK)
2602 {
2603 LOG_ERROR("error while reading instruction: %i", retval);
2604 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2605 }
2606 opcode = target_buffer_get_u16(target, buf);
2607 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2608 }
2609 else
2610 {
2611 LOG_ERROR("BUG: unknown core state encountered");
2612 exit(-1);
2613 }
2614
2615 return ERROR_OK;
2616 }
2617
2618 static int xscale_branch_address(struct xscale_trace_data *trace_data,
2619 int i, uint32_t *target)
2620 {
2621 /* if there are less than four entries prior to the indirect branch message
2622 * we can't extract the address */
2623 if (i < 4)
2624 {
2625 return -1;
2626 }
2627
2628 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2629 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2630
2631 return 0;
2632 }
2633
2634 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2635 {
2636 struct xscale_common *xscale = target_to_xscale(target);
2637 int next_pc_ok = 0;
2638 uint32_t next_pc = 0x0;
2639 struct xscale_trace_data *trace_data = xscale->trace.data;
2640 int retval;
2641
2642 while (trace_data)
2643 {
2644 int i, chkpt;
2645 int rollover;
2646 int branch;
2647 int exception;
2648 xscale->trace.core_state = ARMV4_5_STATE_ARM;
2649
2650 chkpt = 0;
2651 rollover = 0;
2652
2653 for (i = 0; i < trace_data->depth; i++)
2654 {
2655 next_pc_ok = 0;
2656 branch = 0;
2657 exception = 0;
2658
2659 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2660 continue;
2661
2662 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2663 {
2664 case 0: /* Exceptions */
2665 case 1:
2666 case 2:
2667 case 3:
2668 case 4:
2669 case 5:
2670 case 6:
2671 case 7:
2672 exception = (trace_data->entries[i].data & 0x70) >> 4;
2673 next_pc_ok = 1;
2674 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2675 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2676 break;
2677 case 8: /* Direct Branch */
2678 branch = 1;
2679 break;
2680 case 9: /* Indirect Branch */
2681 branch = 1;
2682 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2683 {
2684 next_pc_ok = 1;
2685 }
2686 break;
2687 case 13: /* Checkpointed Indirect Branch */
2688 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2689 {
2690 next_pc_ok = 1;
2691 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2692 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2693 LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2694 }
2695 /* explicit fall-through */
2696 case 12: /* Checkpointed Direct Branch */
2697 branch = 1;
2698 if (chkpt == 0)
2699 {
2700 next_pc_ok = 1;
2701 next_pc = trace_data->chkpt0;
2702 chkpt++;
2703 }
2704 else if (chkpt == 1)
2705 {
2706 next_pc_ok = 1;
2707 next_pc = trace_data->chkpt0;
2708 chkpt++;
2709 }
2710 else
2711 {
2712 LOG_WARNING("more than two checkpointed branches encountered");
2713 }
2714 break;
2715 case 15: /* Roll-over */
2716 rollover++;
2717 continue;
2718 default: /* Reserved */
2719 command_print(cmd_ctx, "--- reserved trace message ---");
2720 LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2721 return ERROR_OK;
2722 }
2723
2724 if (xscale->trace.pc_ok)
2725 {
2726 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2727 struct arm_instruction instruction;
2728
2729 if ((exception == 6) || (exception == 7))
2730 {
2731 /* IRQ or FIQ exception, no instruction executed */
2732 executed -= 1;
2733 }
2734
2735 while (executed-- >= 0)
2736 {
2737 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2738 {
2739 /* can't continue tracing with no image available */
2740 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2741 {
2742 return retval;
2743 }
2744 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2745 {
2746 /* TODO: handle incomplete images */
2747 }
2748 }
2749
2750 /* a precise abort on a load to the PC is included in the incremental
2751 * word count, other instructions causing data aborts are not included
2752 */
2753 if ((executed == 0) && (exception == 4)
2754 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2755 {
2756 if ((instruction.type == ARM_LDM)
2757 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2758 {
2759 executed--;
2760 }
2761 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2762 && (instruction.info.load_store.Rd != 15))
2763 {
2764 executed--;
2765 }
2766 }
2767
2768 /* only the last instruction executed
2769 * (the one that caused the control flow change)
2770 * could be a taken branch
2771 */
2772 if (((executed == -1) && (branch == 1)) &&
2773 (((instruction.type == ARM_B) ||
2774 (instruction.type == ARM_BL) ||
2775 (instruction.type == ARM_BLX)) &&
2776 (instruction.info.b_bl_bx_blx.target_address != 0xffffffff)))
2777 {
2778 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2779 }
2780 else
2781 {
2782 xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2;
2783 }
2784 command_print(cmd_ctx, "%s", instruction.text);
2785 }
2786
2787 rollover = 0;
2788 }
2789
2790 if (next_pc_ok)
2791 {
2792 xscale->trace.current_pc = next_pc;
2793 xscale->trace.pc_ok = 1;
2794 }
2795 }
2796
2797 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2)
2798 {
2799 struct arm_instruction instruction;
2800 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2801 {
2802 /* can't continue tracing with no image available */
2803 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2804 {
2805 return retval;
2806 }
2807 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2808 {
2809 /* TODO: handle incomplete images */
2810 }
2811 }
2812 command_print(cmd_ctx, "%s", instruction.text);
2813 }
2814
2815 trace_data = trace_data->next;
2816 }
2817
2818 return ERROR_OK;
2819 }
2820
2821 static const struct reg_arch_type xscale_reg_type = {
2822 .get = xscale_get_reg,
2823 .set = xscale_set_reg,
2824 };
2825
2826 static void xscale_build_reg_cache(struct target *target)
2827 {
2828 struct xscale_common *xscale = target_to_xscale(target);
2829 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
2830 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2831 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2832 int i;
2833 int num_regs = sizeof(xscale_reg_arch_info) / sizeof(struct xscale_reg);
2834
2835 (*cache_p) = armv4_5_build_reg_cache(target, armv4_5);
2836 armv4_5->core_cache = (*cache_p);
2837
2838 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2839 cache_p = &(*cache_p)->next;
2840
2841 /* fill in values for the xscale reg cache */
2842 (*cache_p)->name = "XScale registers";
2843 (*cache_p)->next = NULL;
2844 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
2845 (*cache_p)->num_regs = num_regs;
2846
2847 for (i = 0; i < num_regs; i++)
2848 {
2849 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2850 (*cache_p)->reg_list[i].value = calloc(4, 1);
2851 (*cache_p)->reg_list[i].dirty = 0;
2852 (*cache_p)->reg_list[i].valid = 0;
2853 (*cache_p)->reg_list[i].size = 32;
2854 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2855 (*cache_p)->reg_list[i].type = &xscale_reg_type;
2856 arch_info[i] = xscale_reg_arch_info[i];
2857 arch_info[i].target = target;
2858 }
2859
2860 xscale->reg_cache = (*cache_p);
2861 }
2862
2863 static int xscale_init_target(struct command_context *cmd_ctx,
2864 struct target *target)
2865 {
2866 xscale_build_reg_cache(target);
2867 return ERROR_OK;
2868 }
2869
2870 static int xscale_init_arch_info(struct target *target,
2871 struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
2872 {
2873 struct arm *armv4_5;
2874 uint32_t high_reset_branch, low_reset_branch;
2875 int i;
2876
2877 armv4_5 = &xscale->armv4_5_common;
2878
2879 /* store architecture specfic data (none so far) */
2880 xscale->common_magic = XSCALE_COMMON_MAGIC;
2881
2882 /* we don't really *need* variant info ... */
2883 if (variant) {
2884 int ir_length = 0;
2885
2886 if (strcmp(variant, "pxa250") == 0
2887 || strcmp(variant, "pxa255") == 0
2888 || strcmp(variant, "pxa26x") == 0)
2889 ir_length = 5;
2890 else if (strcmp(variant, "pxa27x") == 0
2891 || strcmp(variant, "ixp42x") == 0
2892 || strcmp(variant, "ixp45x") == 0
2893 || strcmp(variant, "ixp46x") == 0)
2894 ir_length = 7;
2895 else
2896 LOG_WARNING("%s: unrecognized variant %s",
2897 tap->dotted_name, variant);
2898
2899 if (ir_length && ir_length != tap->ir_length) {
2900 LOG_WARNING("%s: IR length for %s is %d; fixing",
2901 tap->dotted_name, variant, ir_length);
2902 tap->ir_length = ir_length;
2903 }
2904 }
2905
2906 /* the debug handler isn't installed (and thus not running) at this time */
2907 xscale->handler_address = 0xfe000800;
2908
2909 /* clear the vectors we keep locally for reference */
2910 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2911 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2912
2913 /* no user-specified vectors have been configured yet */
2914 xscale->static_low_vectors_set = 0x0;
2915 xscale->static_high_vectors_set = 0x0;
2916
2917 /* calculate branches to debug handler */
2918 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
2919 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
2920
2921 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
2922 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
2923
2924 for (i = 1; i <= 7; i++)
2925 {
2926 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2927 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2928 }
2929
2930 /* 64kB aligned region used for DCache cleaning */
2931 xscale->cache_clean_address = 0xfffe0000;
2932
2933 xscale->hold_rst = 0;
2934 xscale->external_debug_break = 0;
2935
2936 xscale->ibcr_available = 2;
2937 xscale->ibcr0_used = 0;
2938 xscale->ibcr1_used = 0;
2939
2940 xscale->dbr_available = 2;
2941 xscale->dbr0_used = 0;
2942 xscale->dbr1_used = 0;
2943
2944 xscale->arm_bkpt = ARMV5_BKPT(0x0);
2945 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
2946
2947 xscale->vector_catch = 0x1;
2948
2949 xscale->trace.capture_status = TRACE_IDLE;
2950 xscale->trace.data = NULL;
2951 xscale->trace.image = NULL;
2952 xscale->trace.buffer_enabled = 0;
2953 xscale->trace.buffer_fill = 0;
2954
2955 /* prepare ARMv4/5 specific information */
2956 armv4_5->arch_info = xscale;
2957 armv4_5->read_core_reg = xscale_read_core_reg;
2958 armv4_5->write_core_reg = xscale_write_core_reg;
2959 armv4_5->full_context = xscale_full_context;
2960
2961 armv4_5_init_arch_info(target, armv4_5);
2962
2963 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
2964 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
2965 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
2966 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
2967 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
2968 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
2969 xscale->armv4_5_mmu.has_tiny_pages = 1;
2970 xscale->armv4_5_mmu.mmu_enabled = 0;
2971
2972 return ERROR_OK;
2973 }
2974
2975 static int xscale_target_create(struct target *target, Jim_Interp *interp)
2976 {
2977 struct xscale_common *xscale;
2978
2979 if (sizeof xscale_debug_handler - 1 > 0x800) {
2980 LOG_ERROR("debug_handler.bin: larger than 2kb");
2981 return ERROR_FAIL;
2982 }
2983
2984 xscale = calloc(1, sizeof(*xscale));
2985 if (!xscale)
2986 return ERROR_FAIL;
2987
2988 return xscale_init_arch_info(target, xscale, target->tap,
2989 target->variant);
2990 }
2991
2992 COMMAND_HANDLER(xscale_handle_debug_handler_command)
2993 {
2994 struct target *target = NULL;
2995 struct xscale_common *xscale;
2996 int retval;
2997 uint32_t handler_address;
2998
2999 if (CMD_ARGC < 2)
3000 {
3001 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3002 return ERROR_OK;
3003 }
3004
3005 if ((target = get_target(CMD_ARGV[0])) == NULL)
3006 {
3007 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3008 return ERROR_FAIL;
3009 }
3010
3011 xscale = target_to_xscale(target);
3012 retval = xscale_verify_pointer(CMD_CTX, xscale);
3013 if (retval != ERROR_OK)
3014 return retval;
3015
3016 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3017
3018 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3019 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3020 {
3021 xscale->handler_address = handler_address;
3022 }
3023 else
3024 {
3025 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3026 return ERROR_FAIL;
3027 }
3028
3029 return ERROR_OK;
3030 }
3031
3032 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3033 {
3034 struct target *target = NULL;
3035 struct xscale_common *xscale;
3036 int retval;
3037 uint32_t cache_clean_address;
3038
3039 if (CMD_ARGC < 2)
3040 {
3041 return ERROR_COMMAND_SYNTAX_ERROR;
3042 }
3043
3044 target = get_target(CMD_ARGV[0]);
3045 if (target == NULL)
3046 {
3047 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3048 return ERROR_FAIL;
3049 }
3050 xscale = target_to_xscale(target);
3051 retval = xscale_verify_pointer(CMD_CTX, xscale);
3052 if (retval != ERROR_OK)
3053 return retval;
3054
3055 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3056
3057 if (cache_clean_address & 0xffff)
3058 {
3059 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3060 }
3061 else
3062 {
3063 xscale->cache_clean_address = cache_clean_address;
3064 }
3065
3066 return ERROR_OK;
3067 }
3068
3069 COMMAND_HANDLER(xscale_handle_cache_info_command)
3070 {
3071 struct target *target = get_current_target(CMD_CTX);
3072 struct xscale_common *xscale = target_to_xscale(target);
3073 int retval;
3074
3075 retval = xscale_verify_pointer(CMD_CTX, xscale);
3076 if (retval != ERROR_OK)
3077 return retval;
3078
3079 return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
3080 }
3081
3082 static int xscale_virt2phys(struct target *target,
3083 uint32_t virtual, uint32_t *physical)
3084 {
3085 struct xscale_common *xscale = target_to_xscale(target);
3086 int type;
3087 uint32_t cb;
3088 int domain;
3089 uint32_t ap;
3090
3091 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3092 LOG_ERROR(xscale_not);
3093 return ERROR_TARGET_INVALID;
3094 }
3095
3096 uint32_t ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3097 if (type == -1)
3098 {
3099 return ret;
3100 }
3101 *physical = ret;
3102 return ERROR_OK;
3103 }
3104
3105 static int xscale_mmu(struct target *target, int *enabled)
3106 {
3107 struct xscale_common *xscale = target_to_xscale(target);
3108
3109 if (target->state != TARGET_HALTED)
3110 {
3111 LOG_ERROR("Target not halted");
3112 return ERROR_TARGET_INVALID;
3113 }
3114 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3115 return ERROR_OK;
3116 }
3117
3118 COMMAND_HANDLER(xscale_handle_mmu_command)
3119 {
3120 struct target *target = get_current_target(CMD_CTX);
3121 struct xscale_common *xscale = target_to_xscale(target);
3122 int retval;
3123
3124 retval = xscale_verify_pointer(CMD_CTX, xscale);
3125 if (retval != ERROR_OK)
3126 return retval;
3127
3128 if (target->state != TARGET_HALTED)
3129 {
3130 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3131 return ERROR_OK;
3132 }
3133
3134 if (CMD_ARGC >= 1)
3135 {
3136 if (strcmp("enable", CMD_ARGV[0]) == 0)
3137 {
3138 xscale_enable_mmu_caches(target, 1, 0, 0);
3139 xscale->armv4_5_mmu.mmu_enabled = 1;
3140 }
3141 else if (strcmp("disable", CMD_ARGV[0]) == 0)
3142 {
3143 xscale_disable_mmu_caches(target, 1, 0, 0);
3144 xscale->armv4_5_mmu.mmu_enabled = 0;
3145 }
3146 }
3147
3148 command_print(CMD_CTX, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3149
3150 return ERROR_OK;
3151 }
3152
3153 COMMAND_HANDLER(xscale_handle_idcache_command)
3154 {
3155 struct target *target = get_current_target(CMD_CTX);
3156 struct xscale_common *xscale = target_to_xscale(target);
3157 int icache = 0, dcache = 0;
3158 int retval;
3159
3160 retval = xscale_verify_pointer(CMD_CTX, xscale);
3161 if (retval != ERROR_OK)
3162 return retval;
3163
3164 if (target->state != TARGET_HALTED)
3165 {
3166 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3167 return ERROR_OK;
3168 }
3169
3170 if (strcmp(CMD_NAME, "icache") == 0)
3171 icache = 1;
3172 else if (strcmp(CMD_NAME, "dcache") == 0)
3173 dcache = 1;
3174
3175 if (CMD_ARGC >= 1)
3176 {
3177 if (strcmp("enable", CMD_ARGV[0]) == 0)
3178 {
3179 xscale_enable_mmu_caches(target, 0, dcache, icache);
3180
3181 if (icache)
3182 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 1;
3183 else if (dcache)
3184 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 1;
3185 }
3186 else if (strcmp("disable", CMD_ARGV[0]) == 0)
3187 {
3188 xscale_disable_mmu_caches(target, 0, dcache, icache);
3189
3190 if (icache)
3191 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 0;
3192 else if (dcache)
3193 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 0;
3194 }
3195 }
3196
3197 if (icache)
3198 command_print(CMD_CTX, "icache %s", (xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled) ? "enabled" : "disabled");
3199
3200 if (dcache)
3201 command_print(CMD_CTX, "dcache %s", (xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled) ? "enabled" : "disabled");
3202
3203 return ERROR_OK;
3204 }
3205
3206 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3207 {
3208 struct target *target = get_current_target(CMD_CTX);
3209 struct xscale_common *xscale = target_to_xscale(target);
3210 int retval;
3211
3212 retval = xscale_verify_pointer(CMD_CTX, xscale);
3213 if (retval != ERROR_OK)
3214 return retval;
3215
3216 if (CMD_ARGC < 1)
3217 {
3218 command_print(CMD_CTX, "usage: xscale vector_catch [mask]");
3219 }
3220 else
3221 {
3222 COMMAND_PARSE_NUMBER(u8, CMD_ARGV[0], xscale->vector_catch);
3223 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3224 xscale_write_dcsr(target, -1, -1);
3225 }
3226
3227 command_print(CMD_CTX, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3228
3229 return ERROR_OK;
3230 }
3231
3232
3233 COMMAND_HANDLER(xscale_handle_vector_table_command)
3234 {
3235 struct target *target = get_current_target(CMD_CTX);
3236 struct xscale_common *xscale = target_to_xscale(target);
3237 int err = 0;
3238 int retval;
3239
3240 retval = xscale_verify_pointer(CMD_CTX, xscale);
3241 if (retval != ERROR_OK)
3242 return retval;
3243
3244 if (CMD_ARGC == 0) /* print current settings */
3245 {
3246 int idx;
3247
3248 command_print(CMD_CTX, "active user-set static vectors:");
3249 for (idx = 1; idx < 8; idx++)
3250 if (xscale->static_low_vectors_set & (1 << idx))
3251 command_print(CMD_CTX, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
3252 for (idx = 1; idx < 8; idx++)
3253 if (xscale->static_high_vectors_set & (1 << idx))
3254 command_print(CMD_CTX, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
3255 return ERROR_OK;
3256 }
3257
3258 if (CMD_ARGC != 3)
3259 err = 1;
3260 else
3261 {
3262 int idx;
3263 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3264 uint32_t vec;
3265 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3266
3267 if (idx < 1 || idx >= 8)
3268 err = 1;
3269
3270 if (!err && strcmp(CMD_ARGV[0], "low") == 0)
3271 {
3272 xscale->static_low_vectors_set |= (1<<idx);
3273 xscale->static_low_vectors[idx] = vec;
3274 }
3275 else if (!err && (strcmp(CMD_ARGV[0], "high") == 0))
3276 {
3277 xscale->static_high_vectors_set |= (1<<idx);
3278 xscale->static_high_vectors[idx] = vec;
3279 }
3280 else
3281 err = 1;
3282 }
3283
3284 if (err)
3285 command_print(CMD_CTX, "usage: xscale vector_table <high|low> <index> <code>");
3286
3287 return ERROR_OK;
3288 }
3289
3290
3291 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3292 {
3293 struct target *target = get_current_target(CMD_CTX);
3294 struct xscale_common *xscale = target_to_xscale(target);
3295 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
3296 uint32_t dcsr_value;
3297 int retval;
3298
3299 retval = xscale_verify_pointer(CMD_CTX, xscale);
3300 if (retval != ERROR_OK)
3301 return retval;
3302
3303 if (target->state != TARGET_HALTED)
3304 {
3305 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3306 return ERROR_OK;
3307 }
3308
3309 if ((CMD_ARGC >= 1) && (strcmp("enable", CMD_ARGV[0]) == 0))
3310 {
3311 struct xscale_trace_data *td, *next_td;
3312 xscale->trace.buffer_enabled = 1;
3313
3314 /* free old trace data */
3315 td = xscale->trace.data;
3316 while (td)
3317 {
3318 next_td = td->next;
3319
3320 if (td->entries)
3321 free(td->entries);
3322 free(td);
3323 td = next_td;
3324 }
3325 xscale->trace.data = NULL;
3326 }
3327 else if ((CMD_ARGC >= 1) && (strcmp("disable", CMD_ARGV[0]) == 0))
3328 {
3329 xscale->trace.buffer_enabled = 0;
3330 }
3331
3332 if ((CMD_ARGC >= 2) && (strcmp("fill", CMD_ARGV[1]) == 0))
3333 {
3334 uint32_t fill = 1;
3335 if (CMD_ARGC >= 3)
3336 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], fill);
3337 xscale->trace.buffer_fill = fill;
3338 }
3339 else if ((CMD_ARGC >= 2) && (strcmp("wrap", CMD_ARGV[1]) == 0))
3340 {
3341 xscale->trace.buffer_fill = -1;
3342 }
3343
3344 if (xscale->trace.buffer_enabled)
3345 {
3346 /* if we enable the trace buffer in fill-once
3347 * mode we know the address of the first instruction */
3348 xscale->trace.pc_ok = 1;
3349 xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
3350 }
3351 else
3352 {
3353 /* otherwise the address is unknown, and we have no known good PC */
3354 xscale->trace.pc_ok = 0;
3355 }
3356
3357 command_print(CMD_CTX, "trace buffer %s (%s)",
3358 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3359 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3360
3361 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3362 if (xscale->trace.buffer_fill >= 0)
3363 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3364 else
3365 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3366
3367 return ERROR_OK;
3368 }
3369
3370 COMMAND_HANDLER(xscale_handle_trace_image_command)
3371 {
3372 struct target *target = get_current_target(CMD_CTX);
3373 struct xscale_common *xscale = target_to_xscale(target);
3374 int retval;
3375
3376 if (CMD_ARGC < 1)
3377 {
3378 command_print(CMD_CTX, "usage: xscale trace_image <file> [base address] [type]");
3379 return ERROR_OK;
3380 }
3381
3382 retval = xscale_verify_pointer(CMD_CTX, xscale);
3383 if (retval != ERROR_OK)
3384 return retval;
3385
3386 if (xscale->trace.image)
3387 {
3388 image_close(xscale->trace.image);
3389 free(xscale->trace.image);
3390 command_print(CMD_CTX, "previously loaded image found and closed");
3391 }
3392
3393 xscale->trace.image = malloc(sizeof(struct image));
3394 xscale->trace.image->base_address_set = 0;
3395 xscale->trace.image->start_address_set = 0;
3396
3397 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3398 if (CMD_ARGC >= 2)
3399 {
3400 xscale->trace.image->base_address_set = 1;
3401 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], xscale->trace.image->base_address);
3402 }
3403 else
3404 {
3405 xscale->trace.image->base_address_set = 0;
3406 }
3407
3408 if (image_open(xscale->trace.image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3409 {
3410 free(xscale->trace.image);
3411 xscale->trace.image = NULL;
3412 return ERROR_OK;
3413 }
3414
3415 return ERROR_OK;
3416 }
3417
3418 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3419 {
3420 struct target *target = get_current_target(CMD_CTX);
3421 struct xscale_common *xscale = target_to_xscale(target);
3422 struct xscale_trace_data *trace_data;
3423 struct fileio file;
3424 int retval;
3425
3426 retval = xscale_verify_pointer(CMD_CTX, xscale);
3427 if (retval != ERROR_OK)
3428 return retval;
3429
3430 if (target->state != TARGET_HALTED)
3431 {
3432 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3433 return ERROR_OK;
3434 }
3435
3436 if (CMD_ARGC < 1)
3437 {
3438 command_print(CMD_CTX, "usage: xscale dump_trace <file>");
3439 return ERROR_OK;
3440 }
3441
3442 trace_data = xscale->trace.data;
3443
3444 if (!trace_data)
3445 {
3446 command_print(CMD_CTX, "no trace data collected");
3447 return ERROR_OK;
3448 }
3449
3450 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3451 {
3452 return ERROR_OK;
3453 }
3454
3455 while (trace_data)
3456 {
3457 int i;
3458
3459 fileio_write_u32(&file, trace_data->chkpt0);
3460 fileio_write_u32(&file, trace_data->chkpt1);
3461 fileio_write_u32(&file, trace_data->last_instruction);
3462 fileio_write_u32(&file, trace_data->depth);
3463
3464 for (i = 0; i < trace_data->depth; i++)
3465 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3466
3467 trace_data = trace_data->next;
3468 }
3469
3470 fileio_close(&file);
3471
3472 return ERROR_OK;
3473 }
3474
3475 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3476 {
3477 struct target *target = get_current_target(CMD_CTX);
3478 struct xscale_common *xscale = target_to_xscale(target);
3479 int retval;
3480
3481 retval = xscale_verify_pointer(CMD_CTX, xscale);
3482 if (retval != ERROR_OK)
3483 return retval;
3484
3485 xscale_analyze_trace(target, CMD_CTX);
3486
3487 return ERROR_OK;
3488 }
3489
3490 COMMAND_HANDLER(xscale_handle_cp15)
3491 {
3492 struct target *target = get_current_target(CMD_CTX);
3493 struct xscale_common *xscale = target_to_xscale(target);
3494 int retval;
3495
3496 retval = xscale_verify_pointer(CMD_CTX, xscale);
3497 if (retval != ERROR_OK)
3498 return retval;
3499
3500 if (target->state != TARGET_HALTED)
3501 {
3502 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3503 return ERROR_OK;
3504 }
3505 uint32_t reg_no = 0;
3506 struct reg *reg = NULL;
3507 if (CMD_ARGC > 0)
3508 {
3509 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3510 /*translate from xscale cp15 register no to openocd register*/
3511 switch (reg_no)
3512 {
3513 case 0:
3514 reg_no = XSCALE_MAINID;
3515 break;
3516 case 1:
3517 reg_no = XSCALE_CTRL;
3518 break;
3519 case 2:
3520 reg_no = XSCALE_TTB;
3521 break;
3522 case 3:
3523 reg_no = XSCALE_DAC;
3524 break;
3525 case 5:
3526 reg_no = XSCALE_FSR;
3527 break;
3528 case 6:
3529 reg_no = XSCALE_FAR;
3530 break;
3531 case 13:
3532 reg_no = XSCALE_PID;
3533 break;
3534 case 15:
3535 reg_no = XSCALE_CPACCESS;
3536 break;
3537 default:
3538 command_print(CMD_CTX, "invalid register number");
3539 return ERROR_INVALID_ARGUMENTS;
3540 }
3541 reg = &xscale->reg_cache->reg_list[reg_no];
3542
3543 }
3544 if (CMD_ARGC == 1)
3545 {
3546 uint32_t value;
3547
3548 /* read cp15 control register */
3549 xscale_get_reg(reg);
3550 value = buf_get_u32(reg->value, 0, 32);
3551 command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
3552 }
3553 else if (CMD_ARGC == 2)
3554 {
3555 uint32_t value;
3556 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3557
3558 /* send CP write request (command 0x41) */
3559 xscale_send_u32(target, 0x41);
3560
3561 /* send CP register number */
3562 xscale_send_u32(target, reg_no);
3563
3564 /* send CP register value */
3565 xscale_send_u32(target, value);
3566
3567 /* execute cpwait to ensure outstanding operations complete */
3568 xscale_send_u32(target, 0x53);
3569 }
3570 else
3571 {
3572 command_print(CMD_CTX, "usage: cp15 [register]<, [value]>");
3573 }
3574
3575 return ERROR_OK;
3576 }
3577
3578 static int xscale_register_commands(struct command_context *cmd_ctx)
3579 {
3580 struct command *xscale_cmd;
3581
3582 xscale_cmd = register_command(cmd_ctx, NULL, "xscale", NULL, COMMAND_ANY, "xscale specific commands");
3583
3584 register_command(cmd_ctx, xscale_cmd, "debug_handler", xscale_handle_debug_handler_command, COMMAND_ANY, "'xscale debug_handler <target#> <address>' command takes two required operands");
3585 register_command(cmd_ctx, xscale_cmd, "cache_clean_address", xscale_handle_cache_clean_address_command, COMMAND_ANY, NULL);
3586
3587 register_command(cmd_ctx, xscale_cmd, "cache_info", xscale_handle_cache_info_command, COMMAND_EXEC, NULL);
3588 register_command(cmd_ctx, xscale_cmd, "mmu", xscale_handle_mmu_command, COMMAND_EXEC, "['enable'|'disable'] the MMU");
3589 register_command(cmd_ctx, xscale_cmd, "icache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the ICache");
3590 register_command(cmd_ctx, xscale_cmd, "dcache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the DCache");
3591
3592 register_command(cmd_ctx, xscale_cmd, "vector_catch", xscale_handle_vector_catch_command, COMMAND_EXEC, "<mask> of vectors that should be catched");
3593 register_command(cmd_ctx, xscale_cmd, "vector_table", xscale_handle_vector_table_command, COMMAND_EXEC, "<high|low> <index> <code> set static code for exception handler entry");
3594
3595 register_command(cmd_ctx, xscale_cmd, "trace_buffer", xscale_handle_trace_buffer_command, COMMAND_EXEC, "<enable | disable> ['fill' [n]|'wrap']");
3596
3597 register_command(cmd_ctx, xscale_cmd, "dump_trace", xscale_handle_dump_trace_command, COMMAND_EXEC, "dump content of trace buffer to <file>");
3598 register_command(cmd_ctx, xscale_cmd, "analyze_trace", xscale_handle_analyze_trace_buffer_command, COMMAND_EXEC, "analyze content of trace buffer");
3599 register_command(cmd_ctx, xscale_cmd, "trace_image", xscale_handle_trace_image_command,
3600 COMMAND_EXEC, "load image from <file> [base address]");
3601
3602 register_command(cmd_ctx, xscale_cmd, "cp15", xscale_handle_cp15, COMMAND_EXEC, "access coproc 15 <register> [value]");
3603
3604 armv4_5_register_commands(cmd_ctx);
3605
3606 return ERROR_OK;
3607 }
3608
3609 struct target_type xscale_target =
3610 {
3611 .name = "xscale",
3612
3613 .poll = xscale_poll,
3614 .arch_state = xscale_arch_state,
3615
3616 .target_request_data = NULL,
3617
3618 .halt = xscale_halt,
3619 .resume = xscale_resume,
3620 .step = xscale_step,
3621
3622 .assert_reset = xscale_assert_reset,
3623 .deassert_reset = xscale_deassert_reset,
3624 .soft_reset_halt = NULL,
3625
3626 .get_gdb_reg_list = armv4_5_get_gdb_reg_list,
3627
3628 .read_memory = xscale_read_memory,
3629 .write_memory = xscale_write_memory,
3630 .bulk_write_memory = xscale_bulk_write_memory,
3631
3632 .checksum_memory = arm_checksum_memory,
3633 .blank_check_memory = arm_blank_check_memory,
3634
3635 .run_algorithm = armv4_5_run_algorithm,
3636
3637 .add_breakpoint = xscale_add_breakpoint,
3638 .remove_breakpoint = xscale_remove_breakpoint,
3639 .add_watchpoint = xscale_add_watchpoint,
3640 .remove_watchpoint = xscale_remove_watchpoint,
3641
3642 .register_commands = xscale_register_commands,
3643 .target_create = xscale_target_create,
3644 .init_target = xscale_init_target,
3645
3646 .virt2phys = xscale_virt2phys,
3647 .mmu = xscale_mmu
3648 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)