target: create/use register_cache_invalidate()
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
29
30 #include "breakpoints.h"
31 #include "xscale.h"
32 #include "target_type.h"
33 #include "arm_jtag.h"
34 #include "arm_simulator.h"
35 #include "arm_disassembler.h"
36 #include "time_support.h"
37 #include "register.h"
38 #include "image.h"
39
40
41 /*
42 * Important XScale documents available as of October 2009 include:
43 *
44 * Intel XScale® Core Developer’s Manual, January 2004
45 * Order Number: 273473-002
46 * This has a chapter detailing debug facilities, and punts some
47 * details to chip-specific microarchitecture documents.
48 *
49 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
50 * Document Number: 273539-005
51 * Less detailed than the developer's manual, but summarizes those
52 * missing details (for most XScales) and gives LOTS of notes about
53 * debugger/handler interaction issues. Presents a simpler reset
54 * and load-handler sequence than the arch doc. (Note, OpenOCD
55 * doesn't currently support "Hot-Debug" as defined there.)
56 *
57 * Chip-specific microarchitecture documents may also be useful.
58 */
59
60
61 /* forward declarations */
62 static int xscale_resume(struct target *, int current,
63 uint32_t address, int handle_breakpoints, int debug_execution);
64 static int xscale_debug_entry(struct target *);
65 static int xscale_restore_context(struct target *);
66 static int xscale_get_reg(struct reg *reg);
67 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
68 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
69 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
70 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
71 static int xscale_read_trace(struct target *);
72
73
74 /* This XScale "debug handler" is loaded into the processor's
75 * mini-ICache, which is 2K of code writable only via JTAG.
76 *
77 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
78 * binary files cleanly. It's string oriented, and terminates them
79 * with a NUL character. Better would be to generate the constants
80 * and let other code decide names, scoping, and other housekeeping.
81 */
82 static /* unsigned const char xscale_debug_handler[] = ... */
83 #include "xscale_debug.h"
84
85 static char *const xscale_reg_list[] =
86 {
87 "XSCALE_MAINID", /* 0 */
88 "XSCALE_CACHETYPE",
89 "XSCALE_CTRL",
90 "XSCALE_AUXCTRL",
91 "XSCALE_TTB",
92 "XSCALE_DAC",
93 "XSCALE_FSR",
94 "XSCALE_FAR",
95 "XSCALE_PID",
96 "XSCALE_CPACCESS",
97 "XSCALE_IBCR0", /* 10 */
98 "XSCALE_IBCR1",
99 "XSCALE_DBR0",
100 "XSCALE_DBR1",
101 "XSCALE_DBCON",
102 "XSCALE_TBREG",
103 "XSCALE_CHKPT0",
104 "XSCALE_CHKPT1",
105 "XSCALE_DCSR",
106 "XSCALE_TX",
107 "XSCALE_RX", /* 20 */
108 "XSCALE_TXRXCTRL",
109 };
110
111 static const struct xscale_reg xscale_reg_arch_info[] =
112 {
113 {XSCALE_MAINID, NULL},
114 {XSCALE_CACHETYPE, NULL},
115 {XSCALE_CTRL, NULL},
116 {XSCALE_AUXCTRL, NULL},
117 {XSCALE_TTB, NULL},
118 {XSCALE_DAC, NULL},
119 {XSCALE_FSR, NULL},
120 {XSCALE_FAR, NULL},
121 {XSCALE_PID, NULL},
122 {XSCALE_CPACCESS, NULL},
123 {XSCALE_IBCR0, NULL},
124 {XSCALE_IBCR1, NULL},
125 {XSCALE_DBR0, NULL},
126 {XSCALE_DBR1, NULL},
127 {XSCALE_DBCON, NULL},
128 {XSCALE_TBREG, NULL},
129 {XSCALE_CHKPT0, NULL},
130 {XSCALE_CHKPT1, NULL},
131 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
132 {-1, NULL}, /* TX accessed via JTAG */
133 {-1, NULL}, /* RX accessed via JTAG */
134 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
135 };
136
137 /* convenience wrapper to access XScale specific registers */
138 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
139 {
140 uint8_t buf[4];
141
142 buf_set_u32(buf, 0, 32, value);
143
144 return xscale_set_reg(reg, buf);
145 }
146
147 static const char xscale_not[] = "target is not an XScale";
148
149 static int xscale_verify_pointer(struct command_context *cmd_ctx,
150 struct xscale_common *xscale)
151 {
152 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
153 command_print(cmd_ctx, xscale_not);
154 return ERROR_TARGET_INVALID;
155 }
156 return ERROR_OK;
157 }
158
159 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr)
160 {
161 if (tap == NULL)
162 return ERROR_FAIL;
163
164 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
165 {
166 struct scan_field field;
167 uint8_t scratch[4];
168
169 memset(&field, 0, sizeof field);
170 field.tap = tap;
171 field.num_bits = tap->ir_length;
172 field.out_value = scratch;
173 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
174
175 jtag_add_ir_scan(1, &field, jtag_get_end_state());
176 }
177
178 return ERROR_OK;
179 }
180
181 static int xscale_read_dcsr(struct target *target)
182 {
183 struct xscale_common *xscale = target_to_xscale(target);
184 int retval;
185 struct scan_field fields[3];
186 uint8_t field0 = 0x0;
187 uint8_t field0_check_value = 0x2;
188 uint8_t field0_check_mask = 0x7;
189 uint8_t field2 = 0x0;
190 uint8_t field2_check_value = 0x0;
191 uint8_t field2_check_mask = 0x1;
192
193 jtag_set_end_state(TAP_DRPAUSE);
194 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
195
196 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
197 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
198
199 memset(&fields, 0, sizeof fields);
200
201 fields[0].tap = target->tap;
202 fields[0].num_bits = 3;
203 fields[0].out_value = &field0;
204 uint8_t tmp;
205 fields[0].in_value = &tmp;
206
207 fields[1].tap = target->tap;
208 fields[1].num_bits = 32;
209 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
210
211 fields[2].tap = target->tap;
212 fields[2].num_bits = 1;
213 fields[2].out_value = &field2;
214 uint8_t tmp2;
215 fields[2].in_value = &tmp2;
216
217 jtag_add_dr_scan(3, fields, jtag_get_end_state());
218
219 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
220 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
221
222 if ((retval = jtag_execute_queue()) != ERROR_OK)
223 {
224 LOG_ERROR("JTAG error while reading DCSR");
225 return retval;
226 }
227
228 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
229 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
230
231 /* write the register with the value we just read
232 * on this second pass, only the first bit of field0 is guaranteed to be 0)
233 */
234 field0_check_mask = 0x1;
235 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
236 fields[1].in_value = NULL;
237
238 jtag_set_end_state(TAP_IDLE);
239
240 jtag_add_dr_scan(3, fields, jtag_get_end_state());
241
242 /* DANGER!!! this must be here. It will make sure that the arguments
243 * to jtag_set_check_value() does not go out of scope! */
244 return jtag_execute_queue();
245 }
246
247
248 static void xscale_getbuf(jtag_callback_data_t arg)
249 {
250 uint8_t *in = (uint8_t *)arg;
251 *((uint32_t *)in) = buf_get_u32(in, 0, 32);
252 }
253
254 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
255 {
256 if (num_words == 0)
257 return ERROR_INVALID_ARGUMENTS;
258
259 int retval = ERROR_OK;
260 tap_state_t path[3];
261 struct scan_field fields[3];
262 uint8_t *field0 = malloc(num_words * 1);
263 uint8_t field0_check_value = 0x2;
264 uint8_t field0_check_mask = 0x6;
265 uint32_t *field1 = malloc(num_words * 4);
266 uint8_t field2_check_value = 0x0;
267 uint8_t field2_check_mask = 0x1;
268 int words_done = 0;
269 int words_scheduled = 0;
270 int i;
271
272 path[0] = TAP_DRSELECT;
273 path[1] = TAP_DRCAPTURE;
274 path[2] = TAP_DRSHIFT;
275
276 memset(&fields, 0, sizeof fields);
277
278 fields[0].tap = target->tap;
279 fields[0].num_bits = 3;
280 fields[0].check_value = &field0_check_value;
281 fields[0].check_mask = &field0_check_mask;
282
283 fields[1].tap = target->tap;
284 fields[1].num_bits = 32;
285
286 fields[2].tap = target->tap;
287 fields[2].num_bits = 1;
288 fields[2].check_value = &field2_check_value;
289 fields[2].check_mask = &field2_check_mask;
290
291 jtag_set_end_state(TAP_IDLE);
292 xscale_jtag_set_instr(target->tap, XSCALE_DBGTX);
293 jtag_add_runtest(1, jtag_get_end_state()); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
294
295 /* repeat until all words have been collected */
296 int attempts = 0;
297 while (words_done < num_words)
298 {
299 /* schedule reads */
300 words_scheduled = 0;
301 for (i = words_done; i < num_words; i++)
302 {
303 fields[0].in_value = &field0[i];
304
305 jtag_add_pathmove(3, path);
306
307 fields[1].in_value = (uint8_t *)(field1 + i);
308
309 jtag_add_dr_scan_check(3, fields, jtag_set_end_state(TAP_IDLE));
310
311 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
312
313 words_scheduled++;
314 }
315
316 if ((retval = jtag_execute_queue()) != ERROR_OK)
317 {
318 LOG_ERROR("JTAG error while receiving data from debug handler");
319 break;
320 }
321
322 /* examine results */
323 for (i = words_done; i < num_words; i++)
324 {
325 if (!(field0[0] & 1))
326 {
327 /* move backwards if necessary */
328 int j;
329 for (j = i; j < num_words - 1; j++)
330 {
331 field0[j] = field0[j + 1];
332 field1[j] = field1[j + 1];
333 }
334 words_scheduled--;
335 }
336 }
337 if (words_scheduled == 0)
338 {
339 if (attempts++==1000)
340 {
341 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
342 retval = ERROR_TARGET_TIMEOUT;
343 break;
344 }
345 }
346
347 words_done += words_scheduled;
348 }
349
350 for (i = 0; i < num_words; i++)
351 *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
352
353 free(field1);
354
355 return retval;
356 }
357
358 static int xscale_read_tx(struct target *target, int consume)
359 {
360 struct xscale_common *xscale = target_to_xscale(target);
361 tap_state_t path[3];
362 tap_state_t noconsume_path[6];
363 int retval;
364 struct timeval timeout, now;
365 struct scan_field fields[3];
366 uint8_t field0_in = 0x0;
367 uint8_t field0_check_value = 0x2;
368 uint8_t field0_check_mask = 0x6;
369 uint8_t field2_check_value = 0x0;
370 uint8_t field2_check_mask = 0x1;
371
372 jtag_set_end_state(TAP_IDLE);
373
374 xscale_jtag_set_instr(target->tap, XSCALE_DBGTX);
375
376 path[0] = TAP_DRSELECT;
377 path[1] = TAP_DRCAPTURE;
378 path[2] = TAP_DRSHIFT;
379
380 noconsume_path[0] = TAP_DRSELECT;
381 noconsume_path[1] = TAP_DRCAPTURE;
382 noconsume_path[2] = TAP_DREXIT1;
383 noconsume_path[3] = TAP_DRPAUSE;
384 noconsume_path[4] = TAP_DREXIT2;
385 noconsume_path[5] = TAP_DRSHIFT;
386
387 memset(&fields, 0, sizeof fields);
388
389 fields[0].tap = target->tap;
390 fields[0].num_bits = 3;
391 fields[0].in_value = &field0_in;
392
393 fields[1].tap = target->tap;
394 fields[1].num_bits = 32;
395 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
396
397 fields[2].tap = target->tap;
398 fields[2].num_bits = 1;
399 uint8_t tmp;
400 fields[2].in_value = &tmp;
401
402 gettimeofday(&timeout, NULL);
403 timeval_add_time(&timeout, 1, 0);
404
405 for (;;)
406 {
407 /* if we want to consume the register content (i.e. clear TX_READY),
408 * we have to go straight from Capture-DR to Shift-DR
409 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
410 */
411 if (consume)
412 jtag_add_pathmove(3, path);
413 else
414 {
415 jtag_add_pathmove(sizeof(noconsume_path)/sizeof(*noconsume_path), noconsume_path);
416 }
417
418 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
419
420 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
421 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
422
423 if ((retval = jtag_execute_queue()) != ERROR_OK)
424 {
425 LOG_ERROR("JTAG error while reading TX");
426 return ERROR_TARGET_TIMEOUT;
427 }
428
429 gettimeofday(&now, NULL);
430 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
431 {
432 LOG_ERROR("time out reading TX register");
433 return ERROR_TARGET_TIMEOUT;
434 }
435 if (!((!(field0_in & 1)) && consume))
436 {
437 goto done;
438 }
439 if (debug_level >= 3)
440 {
441 LOG_DEBUG("waiting 100ms");
442 alive_sleep(100); /* avoid flooding the logs */
443 } else
444 {
445 keep_alive();
446 }
447 }
448 done:
449
450 if (!(field0_in & 1))
451 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
452
453 return ERROR_OK;
454 }
455
456 static int xscale_write_rx(struct target *target)
457 {
458 struct xscale_common *xscale = target_to_xscale(target);
459 int retval;
460 struct timeval timeout, now;
461 struct scan_field fields[3];
462 uint8_t field0_out = 0x0;
463 uint8_t field0_in = 0x0;
464 uint8_t field0_check_value = 0x2;
465 uint8_t field0_check_mask = 0x6;
466 uint8_t field2 = 0x0;
467 uint8_t field2_check_value = 0x0;
468 uint8_t field2_check_mask = 0x1;
469
470 jtag_set_end_state(TAP_IDLE);
471
472 xscale_jtag_set_instr(target->tap, XSCALE_DBGRX);
473
474 memset(&fields, 0, sizeof fields);
475
476 fields[0].tap = target->tap;
477 fields[0].num_bits = 3;
478 fields[0].out_value = &field0_out;
479 fields[0].in_value = &field0_in;
480
481 fields[1].tap = target->tap;
482 fields[1].num_bits = 32;
483 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
484
485 fields[2].tap = target->tap;
486 fields[2].num_bits = 1;
487 fields[2].out_value = &field2;
488 uint8_t tmp;
489 fields[2].in_value = &tmp;
490
491 gettimeofday(&timeout, NULL);
492 timeval_add_time(&timeout, 1, 0);
493
494 /* poll until rx_read is low */
495 LOG_DEBUG("polling RX");
496 for (;;)
497 {
498 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
499
500 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
501 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
502
503 if ((retval = jtag_execute_queue()) != ERROR_OK)
504 {
505 LOG_ERROR("JTAG error while writing RX");
506 return retval;
507 }
508
509 gettimeofday(&now, NULL);
510 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
511 {
512 LOG_ERROR("time out writing RX register");
513 return ERROR_TARGET_TIMEOUT;
514 }
515 if (!(field0_in & 1))
516 goto done;
517 if (debug_level >= 3)
518 {
519 LOG_DEBUG("waiting 100ms");
520 alive_sleep(100); /* avoid flooding the logs */
521 } else
522 {
523 keep_alive();
524 }
525 }
526 done:
527
528 /* set rx_valid */
529 field2 = 0x1;
530 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
531
532 if ((retval = jtag_execute_queue()) != ERROR_OK)
533 {
534 LOG_ERROR("JTAG error while writing RX");
535 return retval;
536 }
537
538 return ERROR_OK;
539 }
540
541 /* send count elements of size byte to the debug handler */
542 static int xscale_send(struct target *target, uint8_t *buffer, int count, int size)
543 {
544 uint32_t t[3];
545 int bits[3];
546 int retval;
547 int done_count = 0;
548
549 jtag_set_end_state(TAP_IDLE);
550
551 xscale_jtag_set_instr(target->tap, XSCALE_DBGRX);
552
553 bits[0]=3;
554 t[0]=0;
555 bits[1]=32;
556 t[2]=1;
557 bits[2]=1;
558 int endianness = target->endianness;
559 while (done_count++ < count)
560 {
561 switch (size)
562 {
563 case 4:
564 if (endianness == TARGET_LITTLE_ENDIAN)
565 {
566 t[1]=le_to_h_u32(buffer);
567 } else
568 {
569 t[1]=be_to_h_u32(buffer);
570 }
571 break;
572 case 2:
573 if (endianness == TARGET_LITTLE_ENDIAN)
574 {
575 t[1]=le_to_h_u16(buffer);
576 } else
577 {
578 t[1]=be_to_h_u16(buffer);
579 }
580 break;
581 case 1:
582 t[1]=buffer[0];
583 break;
584 default:
585 LOG_ERROR("BUG: size neither 4, 2 nor 1");
586 return ERROR_INVALID_ARGUMENTS;
587 }
588 jtag_add_dr_out(target->tap,
589 3,
590 bits,
591 t,
592 jtag_set_end_state(TAP_IDLE));
593 buffer += size;
594 }
595
596 if ((retval = jtag_execute_queue()) != ERROR_OK)
597 {
598 LOG_ERROR("JTAG error while sending data to debug handler");
599 return retval;
600 }
601
602 return ERROR_OK;
603 }
604
605 static int xscale_send_u32(struct target *target, uint32_t value)
606 {
607 struct xscale_common *xscale = target_to_xscale(target);
608
609 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
610 return xscale_write_rx(target);
611 }
612
613 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
614 {
615 struct xscale_common *xscale = target_to_xscale(target);
616 int retval;
617 struct scan_field fields[3];
618 uint8_t field0 = 0x0;
619 uint8_t field0_check_value = 0x2;
620 uint8_t field0_check_mask = 0x7;
621 uint8_t field2 = 0x0;
622 uint8_t field2_check_value = 0x0;
623 uint8_t field2_check_mask = 0x1;
624
625 if (hold_rst != -1)
626 xscale->hold_rst = hold_rst;
627
628 if (ext_dbg_brk != -1)
629 xscale->external_debug_break = ext_dbg_brk;
630
631 jtag_set_end_state(TAP_IDLE);
632 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
633
634 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
635 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
636
637 memset(&fields, 0, sizeof fields);
638
639 fields[0].tap = target->tap;
640 fields[0].num_bits = 3;
641 fields[0].out_value = &field0;
642 uint8_t tmp;
643 fields[0].in_value = &tmp;
644
645 fields[1].tap = target->tap;
646 fields[1].num_bits = 32;
647 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
648
649 fields[2].tap = target->tap;
650 fields[2].num_bits = 1;
651 fields[2].out_value = &field2;
652 uint8_t tmp2;
653 fields[2].in_value = &tmp2;
654
655 jtag_add_dr_scan(3, fields, jtag_get_end_state());
656
657 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
658 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
659
660 if ((retval = jtag_execute_queue()) != ERROR_OK)
661 {
662 LOG_ERROR("JTAG error while writing DCSR");
663 return retval;
664 }
665
666 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
667 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
668
669 return ERROR_OK;
670 }
671
672 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
673 static unsigned int parity (unsigned int v)
674 {
675 // unsigned int ov = v;
676 v ^= v >> 16;
677 v ^= v >> 8;
678 v ^= v >> 4;
679 v &= 0xf;
680 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
681 return (0x6996 >> v) & 1;
682 }
683
684 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
685 {
686 uint8_t packet[4];
687 uint8_t cmd;
688 int word;
689 struct scan_field fields[2];
690
691 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
692
693 /* LDIC into IR */
694 jtag_set_end_state(TAP_IDLE);
695 xscale_jtag_set_instr(target->tap, XSCALE_LDIC);
696
697 /* CMD is b011 to load a cacheline into the Mini ICache.
698 * Loading into the main ICache is deprecated, and unused.
699 * It's followed by three zero bits, and 27 address bits.
700 */
701 buf_set_u32(&cmd, 0, 6, 0x3);
702
703 /* virtual address of desired cache line */
704 buf_set_u32(packet, 0, 27, va >> 5);
705
706 memset(&fields, 0, sizeof fields);
707
708 fields[0].tap = target->tap;
709 fields[0].num_bits = 6;
710 fields[0].out_value = &cmd;
711
712 fields[1].tap = target->tap;
713 fields[1].num_bits = 27;
714 fields[1].out_value = packet;
715
716 jtag_add_dr_scan(2, fields, jtag_get_end_state());
717
718 /* rest of packet is a cacheline: 8 instructions, with parity */
719 fields[0].num_bits = 32;
720 fields[0].out_value = packet;
721
722 fields[1].num_bits = 1;
723 fields[1].out_value = &cmd;
724
725 for (word = 0; word < 8; word++)
726 {
727 buf_set_u32(packet, 0, 32, buffer[word]);
728
729 uint32_t value;
730 memcpy(&value, packet, sizeof(uint32_t));
731 cmd = parity(value);
732
733 jtag_add_dr_scan(2, fields, jtag_get_end_state());
734 }
735
736 return jtag_execute_queue();
737 }
738
739 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
740 {
741 uint8_t packet[4];
742 uint8_t cmd;
743 struct scan_field fields[2];
744
745 jtag_set_end_state(TAP_IDLE);
746 xscale_jtag_set_instr(target->tap, XSCALE_LDIC);
747
748 /* CMD for invalidate IC line b000, bits [6:4] b000 */
749 buf_set_u32(&cmd, 0, 6, 0x0);
750
751 /* virtual address of desired cache line */
752 buf_set_u32(packet, 0, 27, va >> 5);
753
754 memset(&fields, 0, sizeof fields);
755
756 fields[0].tap = target->tap;
757 fields[0].num_bits = 6;
758 fields[0].out_value = &cmd;
759
760 fields[1].tap = target->tap;
761 fields[1].num_bits = 27;
762 fields[1].out_value = packet;
763
764 jtag_add_dr_scan(2, fields, jtag_get_end_state());
765
766 return ERROR_OK;
767 }
768
769 static int xscale_update_vectors(struct target *target)
770 {
771 struct xscale_common *xscale = target_to_xscale(target);
772 int i;
773 int retval;
774
775 uint32_t low_reset_branch, high_reset_branch;
776
777 for (i = 1; i < 8; i++)
778 {
779 /* if there's a static vector specified for this exception, override */
780 if (xscale->static_high_vectors_set & (1 << i))
781 {
782 xscale->high_vectors[i] = xscale->static_high_vectors[i];
783 }
784 else
785 {
786 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
787 if (retval == ERROR_TARGET_TIMEOUT)
788 return retval;
789 if (retval != ERROR_OK)
790 {
791 /* Some of these reads will fail as part of normal execution */
792 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
793 }
794 }
795 }
796
797 for (i = 1; i < 8; i++)
798 {
799 if (xscale->static_low_vectors_set & (1 << i))
800 {
801 xscale->low_vectors[i] = xscale->static_low_vectors[i];
802 }
803 else
804 {
805 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
806 if (retval == ERROR_TARGET_TIMEOUT)
807 return retval;
808 if (retval != ERROR_OK)
809 {
810 /* Some of these reads will fail as part of normal execution */
811 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
812 }
813 }
814 }
815
816 /* calculate branches to debug handler */
817 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
818 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
819
820 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
821 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
822
823 /* invalidate and load exception vectors in mini i-cache */
824 xscale_invalidate_ic_line(target, 0x0);
825 xscale_invalidate_ic_line(target, 0xffff0000);
826
827 xscale_load_ic(target, 0x0, xscale->low_vectors);
828 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
829
830 return ERROR_OK;
831 }
832
833 static int xscale_arch_state(struct target *target)
834 {
835 struct xscale_common *xscale = target_to_xscale(target);
836 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
837
838 static const char *state[] =
839 {
840 "disabled", "enabled"
841 };
842
843 static const char *arch_dbg_reason[] =
844 {
845 "", "\n(processor reset)", "\n(trace buffer full)"
846 };
847
848 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
849 {
850 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
851 return ERROR_INVALID_ARGUMENTS;
852 }
853
854 LOG_USER("target halted in %s state due to %s, current mode: %s\n"
855 "cpsr: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "\n"
856 "MMU: %s, D-Cache: %s, I-Cache: %s"
857 "%s",
858 armv4_5_state_strings[armv4_5->core_state],
859 Jim_Nvp_value2name_simple(nvp_target_debug_reason, target->debug_reason)->name ,
860 arm_mode_name(armv4_5->core_mode),
861 buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32),
862 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32),
863 state[xscale->armv4_5_mmu.mmu_enabled],
864 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
865 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
866 arch_dbg_reason[xscale->arch_debug_reason]);
867
868 return ERROR_OK;
869 }
870
871 static int xscale_poll(struct target *target)
872 {
873 int retval = ERROR_OK;
874
875 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
876 {
877 enum target_state previous_state = target->state;
878 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
879 {
880
881 /* there's data to read from the tx register, we entered debug state */
882 target->state = TARGET_HALTED;
883
884 /* process debug entry, fetching current mode regs */
885 retval = xscale_debug_entry(target);
886 }
887 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
888 {
889 LOG_USER("error while polling TX register, reset CPU");
890 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
891 target->state = TARGET_HALTED;
892 }
893
894 /* debug_entry could have overwritten target state (i.e. immediate resume)
895 * don't signal event handlers in that case
896 */
897 if (target->state != TARGET_HALTED)
898 return ERROR_OK;
899
900 /* if target was running, signal that we halted
901 * otherwise we reentered from debug execution */
902 if (previous_state == TARGET_RUNNING)
903 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
904 else
905 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
906 }
907
908 return retval;
909 }
910
911 static int xscale_debug_entry(struct target *target)
912 {
913 struct xscale_common *xscale = target_to_xscale(target);
914 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
915 uint32_t pc;
916 uint32_t buffer[10];
917 int i;
918 int retval;
919 uint32_t moe;
920
921 /* clear external dbg break (will be written on next DCSR read) */
922 xscale->external_debug_break = 0;
923 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
924 return retval;
925
926 /* get r0, pc, r1 to r7 and cpsr */
927 if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
928 return retval;
929
930 /* move r0 from buffer to register cache */
931 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
932 armv4_5->core_cache->reg_list[0].dirty = 1;
933 armv4_5->core_cache->reg_list[0].valid = 1;
934 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
935
936 /* move pc from buffer to register cache */
937 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
938 armv4_5->core_cache->reg_list[15].dirty = 1;
939 armv4_5->core_cache->reg_list[15].valid = 1;
940 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
941
942 /* move data from buffer to register cache */
943 for (i = 1; i <= 7; i++)
944 {
945 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
946 armv4_5->core_cache->reg_list[i].dirty = 1;
947 armv4_5->core_cache->reg_list[i].valid = 1;
948 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
949 }
950
951 buf_set_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32, buffer[9]);
952 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty = 1;
953 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].valid = 1;
954 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
955
956 armv4_5->core_mode = buffer[9] & 0x1f;
957 if (!is_arm_mode(armv4_5->core_mode))
958 {
959 target->state = TARGET_UNKNOWN;
960 LOG_ERROR("cpsr contains invalid mode value - communication failure");
961 return ERROR_TARGET_FAILURE;
962 }
963 LOG_DEBUG("target entered debug state in %s mode",
964 arm_mode_name(armv4_5->core_mode));
965
966 if (buffer[9] & 0x20)
967 armv4_5->core_state = ARMV4_5_STATE_THUMB;
968 else
969 armv4_5->core_state = ARMV4_5_STATE_ARM;
970
971
972 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
973 if ((armv4_5->core_mode != ARMV4_5_MODE_USR) && (armv4_5->core_mode != ARMV4_5_MODE_SYS))
974 {
975 xscale_receive(target, buffer, 8);
976 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
977 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).dirty = 0;
978 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).valid = 1;
979 }
980 else
981 {
982 /* r8 to r14, but no spsr */
983 xscale_receive(target, buffer, 7);
984 }
985
986 /* move data from buffer to register cache */
987 for (i = 8; i <= 14; i++)
988 {
989 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).value, 0, 32, buffer[i - 8]);
990 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 0;
991 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid = 1;
992 }
993
994 /* examine debug reason */
995 xscale_read_dcsr(target);
996 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
997
998 /* stored PC (for calculating fixup) */
999 pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1000
1001 switch (moe)
1002 {
1003 case 0x0: /* Processor reset */
1004 target->debug_reason = DBG_REASON_DBGRQ;
1005 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
1006 pc -= 4;
1007 break;
1008 case 0x1: /* Instruction breakpoint hit */
1009 target->debug_reason = DBG_REASON_BREAKPOINT;
1010 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1011 pc -= 4;
1012 break;
1013 case 0x2: /* Data breakpoint hit */
1014 target->debug_reason = DBG_REASON_WATCHPOINT;
1015 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1016 pc -= 4;
1017 break;
1018 case 0x3: /* BKPT instruction executed */
1019 target->debug_reason = DBG_REASON_BREAKPOINT;
1020 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1021 pc -= 4;
1022 break;
1023 case 0x4: /* Ext. debug event */
1024 target->debug_reason = DBG_REASON_DBGRQ;
1025 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1026 pc -= 4;
1027 break;
1028 case 0x5: /* Vector trap occured */
1029 target->debug_reason = DBG_REASON_BREAKPOINT;
1030 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1031 pc -= 4;
1032 break;
1033 case 0x6: /* Trace buffer full break */
1034 target->debug_reason = DBG_REASON_DBGRQ;
1035 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1036 pc -= 4;
1037 break;
1038 case 0x7: /* Reserved (may flag Hot-Debug support) */
1039 default:
1040 LOG_ERROR("Method of Entry is 'Reserved'");
1041 exit(-1);
1042 break;
1043 }
1044
1045 /* apply PC fixup */
1046 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
1047
1048 /* on the first debug entry, identify cache type */
1049 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1050 {
1051 uint32_t cache_type_reg;
1052
1053 /* read cp15 cache type register */
1054 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1055 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1056
1057 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1058 }
1059
1060 /* examine MMU and Cache settings */
1061 /* read cp15 control register */
1062 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1063 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1064 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1065 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1066 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1067
1068 /* tracing enabled, read collected trace data */
1069 if (xscale->trace.buffer_enabled)
1070 {
1071 xscale_read_trace(target);
1072 xscale->trace.buffer_fill--;
1073
1074 /* resume if we're still collecting trace data */
1075 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1076 && (xscale->trace.buffer_fill > 0))
1077 {
1078 xscale_resume(target, 1, 0x0, 1, 0);
1079 }
1080 else
1081 {
1082 xscale->trace.buffer_enabled = 0;
1083 }
1084 }
1085
1086 return ERROR_OK;
1087 }
1088
1089 static int xscale_halt(struct target *target)
1090 {
1091 struct xscale_common *xscale = target_to_xscale(target);
1092
1093 LOG_DEBUG("target->state: %s",
1094 target_state_name(target));
1095
1096 if (target->state == TARGET_HALTED)
1097 {
1098 LOG_DEBUG("target was already halted");
1099 return ERROR_OK;
1100 }
1101 else if (target->state == TARGET_UNKNOWN)
1102 {
1103 /* this must not happen for a xscale target */
1104 LOG_ERROR("target was in unknown state when halt was requested");
1105 return ERROR_TARGET_INVALID;
1106 }
1107 else if (target->state == TARGET_RESET)
1108 {
1109 LOG_DEBUG("target->state == TARGET_RESET");
1110 }
1111 else
1112 {
1113 /* assert external dbg break */
1114 xscale->external_debug_break = 1;
1115 xscale_read_dcsr(target);
1116
1117 target->debug_reason = DBG_REASON_DBGRQ;
1118 }
1119
1120 return ERROR_OK;
1121 }
1122
1123 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1124 {
1125 struct xscale_common *xscale = target_to_xscale(target);
1126 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1127 int retval;
1128
1129 if (xscale->ibcr0_used)
1130 {
1131 struct breakpoint *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1132
1133 if (ibcr0_bp)
1134 {
1135 xscale_unset_breakpoint(target, ibcr0_bp);
1136 }
1137 else
1138 {
1139 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1140 exit(-1);
1141 }
1142 }
1143
1144 if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
1145 return retval;
1146
1147 return ERROR_OK;
1148 }
1149
1150 static int xscale_disable_single_step(struct target *target)
1151 {
1152 struct xscale_common *xscale = target_to_xscale(target);
1153 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1154 int retval;
1155
1156 if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
1157 return retval;
1158
1159 return ERROR_OK;
1160 }
1161
1162 static void xscale_enable_watchpoints(struct target *target)
1163 {
1164 struct watchpoint *watchpoint = target->watchpoints;
1165
1166 while (watchpoint)
1167 {
1168 if (watchpoint->set == 0)
1169 xscale_set_watchpoint(target, watchpoint);
1170 watchpoint = watchpoint->next;
1171 }
1172 }
1173
1174 static void xscale_enable_breakpoints(struct target *target)
1175 {
1176 struct breakpoint *breakpoint = target->breakpoints;
1177
1178 /* set any pending breakpoints */
1179 while (breakpoint)
1180 {
1181 if (breakpoint->set == 0)
1182 xscale_set_breakpoint(target, breakpoint);
1183 breakpoint = breakpoint->next;
1184 }
1185 }
1186
1187 static int xscale_resume(struct target *target, int current,
1188 uint32_t address, int handle_breakpoints, int debug_execution)
1189 {
1190 struct xscale_common *xscale = target_to_xscale(target);
1191 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
1192 struct breakpoint *breakpoint = target->breakpoints;
1193 uint32_t current_pc;
1194 int retval;
1195 int i;
1196
1197 LOG_DEBUG("-");
1198
1199 if (target->state != TARGET_HALTED)
1200 {
1201 LOG_WARNING("target not halted");
1202 return ERROR_TARGET_NOT_HALTED;
1203 }
1204
1205 if (!debug_execution)
1206 {
1207 target_free_all_working_areas(target);
1208 }
1209
1210 /* update vector tables */
1211 if ((retval = xscale_update_vectors(target)) != ERROR_OK)
1212 return retval;
1213
1214 /* current = 1: continue on current pc, otherwise continue at <address> */
1215 if (!current)
1216 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1217
1218 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1219
1220 /* if we're at the reset vector, we have to simulate the branch */
1221 if (current_pc == 0x0)
1222 {
1223 arm_simulate_step(target, NULL);
1224 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1225 }
1226
1227 /* the front-end may request us not to handle breakpoints */
1228 if (handle_breakpoints)
1229 {
1230 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1231 {
1232 uint32_t next_pc;
1233
1234 /* there's a breakpoint at the current PC, we have to step over it */
1235 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1236 xscale_unset_breakpoint(target, breakpoint);
1237
1238 /* calculate PC of next instruction */
1239 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1240 {
1241 uint32_t current_opcode;
1242 target_read_u32(target, current_pc, &current_opcode);
1243 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1244 }
1245
1246 LOG_DEBUG("enable single-step");
1247 xscale_enable_single_step(target, next_pc);
1248
1249 /* restore banked registers */
1250 xscale_restore_context(target);
1251
1252 /* send resume request (command 0x30 or 0x31)
1253 * clean the trace buffer if it is to be enabled (0x62) */
1254 if (xscale->trace.buffer_enabled)
1255 {
1256 xscale_send_u32(target, 0x62);
1257 xscale_send_u32(target, 0x31);
1258 }
1259 else
1260 xscale_send_u32(target, 0x30);
1261
1262 /* send CPSR */
1263 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1264 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1265
1266 for (i = 7; i >= 0; i--)
1267 {
1268 /* send register */
1269 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1270 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1271 }
1272
1273 /* send PC */
1274 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1275 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1276
1277 /* wait for and process debug entry */
1278 xscale_debug_entry(target);
1279
1280 LOG_DEBUG("disable single-step");
1281 xscale_disable_single_step(target);
1282
1283 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1284 xscale_set_breakpoint(target, breakpoint);
1285 }
1286 }
1287
1288 /* enable any pending breakpoints and watchpoints */
1289 xscale_enable_breakpoints(target);
1290 xscale_enable_watchpoints(target);
1291
1292 /* restore banked registers */
1293 xscale_restore_context(target);
1294
1295 /* send resume request (command 0x30 or 0x31)
1296 * clean the trace buffer if it is to be enabled (0x62) */
1297 if (xscale->trace.buffer_enabled)
1298 {
1299 xscale_send_u32(target, 0x62);
1300 xscale_send_u32(target, 0x31);
1301 }
1302 else
1303 xscale_send_u32(target, 0x30);
1304
1305 /* send CPSR */
1306 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1307 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1308
1309 for (i = 7; i >= 0; i--)
1310 {
1311 /* send register */
1312 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1313 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1314 }
1315
1316 /* send PC */
1317 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1318 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1319
1320 target->debug_reason = DBG_REASON_NOTHALTED;
1321
1322 if (!debug_execution)
1323 {
1324 /* registers are now invalid */
1325 register_cache_invalidate(armv4_5->core_cache);
1326 target->state = TARGET_RUNNING;
1327 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1328 }
1329 else
1330 {
1331 target->state = TARGET_DEBUG_RUNNING;
1332 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1333 }
1334
1335 LOG_DEBUG("target resumed");
1336
1337 return ERROR_OK;
1338 }
1339
1340 static int xscale_step_inner(struct target *target, int current,
1341 uint32_t address, int handle_breakpoints)
1342 {
1343 struct xscale_common *xscale = target_to_xscale(target);
1344 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
1345 uint32_t next_pc;
1346 int retval;
1347 int i;
1348
1349 target->debug_reason = DBG_REASON_SINGLESTEP;
1350
1351 /* calculate PC of next instruction */
1352 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1353 {
1354 uint32_t current_opcode, current_pc;
1355 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1356
1357 target_read_u32(target, current_pc, &current_opcode);
1358 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1359 return retval;
1360 }
1361
1362 LOG_DEBUG("enable single-step");
1363 if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
1364 return retval;
1365
1366 /* restore banked registers */
1367 if ((retval = xscale_restore_context(target)) != ERROR_OK)
1368 return retval;
1369
1370 /* send resume request (command 0x30 or 0x31)
1371 * clean the trace buffer if it is to be enabled (0x62) */
1372 if (xscale->trace.buffer_enabled)
1373 {
1374 if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
1375 return retval;
1376 if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
1377 return retval;
1378 }
1379 else
1380 if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
1381 return retval;
1382
1383 /* send CPSR */
1384 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32))) != ERROR_OK)
1385 return retval;
1386 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1387
1388 for (i = 7; i >= 0; i--)
1389 {
1390 /* send register */
1391 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
1392 return retval;
1393 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1394 }
1395
1396 /* send PC */
1397 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))) != ERROR_OK)
1398 return retval;
1399 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1400
1401 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1402
1403 /* registers are now invalid */
1404 register_cache_invalidate(armv4_5->core_cache);
1405
1406 /* wait for and process debug entry */
1407 if ((retval = xscale_debug_entry(target)) != ERROR_OK)
1408 return retval;
1409
1410 LOG_DEBUG("disable single-step");
1411 if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
1412 return retval;
1413
1414 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1415
1416 return ERROR_OK;
1417 }
1418
1419 static int xscale_step(struct target *target, int current,
1420 uint32_t address, int handle_breakpoints)
1421 {
1422 struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
1423 struct breakpoint *breakpoint = target->breakpoints;
1424
1425 uint32_t current_pc;
1426 int retval;
1427
1428 if (target->state != TARGET_HALTED)
1429 {
1430 LOG_WARNING("target not halted");
1431 return ERROR_TARGET_NOT_HALTED;
1432 }
1433
1434 /* current = 1: continue on current pc, otherwise continue at <address> */
1435 if (!current)
1436 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1437
1438 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1439
1440 /* if we're at the reset vector, we have to simulate the step */
1441 if (current_pc == 0x0)
1442 {
1443 if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
1444 return retval;
1445 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1446
1447 target->debug_reason = DBG_REASON_SINGLESTEP;
1448 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1449
1450 return ERROR_OK;
1451 }
1452
1453 /* the front-end may request us not to handle breakpoints */
1454 if (handle_breakpoints)
1455 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1456 {
1457 if ((retval = xscale_unset_breakpoint(target, breakpoint)) != ERROR_OK)
1458 return retval;
1459 }
1460
1461 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1462
1463 if (breakpoint)
1464 {
1465 xscale_set_breakpoint(target, breakpoint);
1466 }
1467
1468 LOG_DEBUG("target stepped");
1469
1470 return ERROR_OK;
1471
1472 }
1473
1474 static int xscale_assert_reset(struct target *target)
1475 {
1476 struct xscale_common *xscale = target_to_xscale(target);
1477
1478 LOG_DEBUG("target->state: %s",
1479 target_state_name(target));
1480
1481 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1482 * end up in T-L-R, which would reset JTAG
1483 */
1484 jtag_set_end_state(TAP_IDLE);
1485 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
1486
1487 /* set Hold reset, Halt mode and Trap Reset */
1488 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1489 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1490 xscale_write_dcsr(target, 1, 0);
1491
1492 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1493 xscale_jtag_set_instr(target->tap, 0x7f);
1494 jtag_execute_queue();
1495
1496 /* assert reset */
1497 jtag_add_reset(0, 1);
1498
1499 /* sleep 1ms, to be sure we fulfill any requirements */
1500 jtag_add_sleep(1000);
1501 jtag_execute_queue();
1502
1503 target->state = TARGET_RESET;
1504
1505 if (target->reset_halt)
1506 {
1507 int retval;
1508 if ((retval = target_halt(target)) != ERROR_OK)
1509 return retval;
1510 }
1511
1512 return ERROR_OK;
1513 }
1514
1515 static int xscale_deassert_reset(struct target *target)
1516 {
1517 struct xscale_common *xscale = target_to_xscale(target);
1518 struct breakpoint *breakpoint = target->breakpoints;
1519
1520 LOG_DEBUG("-");
1521
1522 xscale->ibcr_available = 2;
1523 xscale->ibcr0_used = 0;
1524 xscale->ibcr1_used = 0;
1525
1526 xscale->dbr_available = 2;
1527 xscale->dbr0_used = 0;
1528 xscale->dbr1_used = 0;
1529
1530 /* mark all hardware breakpoints as unset */
1531 while (breakpoint)
1532 {
1533 if (breakpoint->type == BKPT_HARD)
1534 {
1535 breakpoint->set = 0;
1536 }
1537 breakpoint = breakpoint->next;
1538 }
1539
1540 register_cache_invalidate(xscale->armv4_5_common.core_cache);
1541
1542 /* FIXME mark hardware watchpoints got unset too. Also,
1543 * at least some of the XScale registers are invalid...
1544 */
1545
1546 /*
1547 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1548 * contents got invalidated. Safer to force that, so writing new
1549 * contents can't ever fail..
1550 */
1551 {
1552 uint32_t address;
1553 unsigned buf_cnt;
1554 const uint8_t *buffer = xscale_debug_handler;
1555 int retval;
1556
1557 /* release SRST */
1558 jtag_add_reset(0, 0);
1559
1560 /* wait 300ms; 150 and 100ms were not enough */
1561 jtag_add_sleep(300*1000);
1562
1563 jtag_add_runtest(2030, jtag_set_end_state(TAP_IDLE));
1564 jtag_execute_queue();
1565
1566 /* set Hold reset, Halt mode and Trap Reset */
1567 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1568 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1569 xscale_write_dcsr(target, 1, 0);
1570
1571 /* Load the debug handler into the mini-icache. Since
1572 * it's using halt mode (not monitor mode), it runs in
1573 * "Special Debug State" for access to registers, memory,
1574 * coprocessors, trace data, etc.
1575 */
1576 address = xscale->handler_address;
1577 for (unsigned binary_size = sizeof xscale_debug_handler - 1;
1578 binary_size > 0;
1579 binary_size -= buf_cnt, buffer += buf_cnt)
1580 {
1581 uint32_t cache_line[8];
1582 unsigned i;
1583
1584 buf_cnt = binary_size;
1585 if (buf_cnt > 32)
1586 buf_cnt = 32;
1587
1588 for (i = 0; i < buf_cnt; i += 4)
1589 {
1590 /* convert LE buffer to host-endian uint32_t */
1591 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1592 }
1593
1594 for (; i < 32; i += 4)
1595 {
1596 cache_line[i / 4] = 0xe1a08008;
1597 }
1598
1599 /* only load addresses other than the reset vectors */
1600 if ((address % 0x400) != 0x0)
1601 {
1602 retval = xscale_load_ic(target, address,
1603 cache_line);
1604 if (retval != ERROR_OK)
1605 return retval;
1606 }
1607
1608 address += buf_cnt;
1609 };
1610
1611 retval = xscale_load_ic(target, 0x0,
1612 xscale->low_vectors);
1613 if (retval != ERROR_OK)
1614 return retval;
1615 retval = xscale_load_ic(target, 0xffff0000,
1616 xscale->high_vectors);
1617 if (retval != ERROR_OK)
1618 return retval;
1619
1620 jtag_add_runtest(30, jtag_set_end_state(TAP_IDLE));
1621
1622 jtag_add_sleep(100000);
1623
1624 /* set Hold reset, Halt mode and Trap Reset */
1625 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1626 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1627 xscale_write_dcsr(target, 1, 0);
1628
1629 /* clear Hold reset to let the target run (should enter debug handler) */
1630 xscale_write_dcsr(target, 0, 1);
1631 target->state = TARGET_RUNNING;
1632
1633 if (!target->reset_halt)
1634 {
1635 jtag_add_sleep(10000);
1636
1637 /* we should have entered debug now */
1638 xscale_debug_entry(target);
1639 target->state = TARGET_HALTED;
1640
1641 /* resume the target */
1642 xscale_resume(target, 1, 0x0, 1, 0);
1643 }
1644 }
1645
1646 return ERROR_OK;
1647 }
1648
1649 static int xscale_read_core_reg(struct target *target, int num,
1650 enum armv4_5_mode mode)
1651 {
1652 LOG_ERROR("not implemented");
1653 return ERROR_OK;
1654 }
1655
1656 static int xscale_write_core_reg(struct target *target, int num,
1657 enum armv4_5_mode mode, uint32_t value)
1658 {
1659 LOG_ERROR("not implemented");
1660 return ERROR_OK;
1661 }
1662
1663 static int xscale_full_context(struct target *target)
1664 {
1665 struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
1666
1667 uint32_t *buffer;
1668
1669 int i, j;
1670
1671 LOG_DEBUG("-");
1672
1673 if (target->state != TARGET_HALTED)
1674 {
1675 LOG_WARNING("target not halted");
1676 return ERROR_TARGET_NOT_HALTED;
1677 }
1678
1679 buffer = malloc(4 * 8);
1680
1681 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1682 * we can't enter User mode on an XScale (unpredictable),
1683 * but User shares registers with SYS
1684 */
1685 for (i = 1; i < 7; i++)
1686 {
1687 int valid = 1;
1688
1689 /* check if there are invalid registers in the current mode
1690 */
1691 for (j = 0; j <= 16; j++)
1692 {
1693 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid == 0)
1694 valid = 0;
1695 }
1696
1697 if (!valid)
1698 {
1699 uint32_t tmp_cpsr;
1700
1701 /* request banked registers */
1702 xscale_send_u32(target, 0x0);
1703
1704 tmp_cpsr = 0x0;
1705 tmp_cpsr |= armv4_5_number_to_mode(i);
1706 tmp_cpsr |= 0xc0; /* I/F bits */
1707
1708 /* send CPSR for desired mode */
1709 xscale_send_u32(target, tmp_cpsr);
1710
1711 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1712 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1713 {
1714 xscale_receive(target, buffer, 8);
1715 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1716 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1717 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).valid = 1;
1718 }
1719 else
1720 {
1721 xscale_receive(target, buffer, 7);
1722 }
1723
1724 /* move data from buffer to register cache */
1725 for (j = 8; j <= 14; j++)
1726 {
1727 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).value, 0, 32, buffer[j - 8]);
1728 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1729 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid = 1;
1730 }
1731 }
1732 }
1733
1734 free(buffer);
1735
1736 return ERROR_OK;
1737 }
1738
1739 static int xscale_restore_context(struct target *target)
1740 {
1741 struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
1742
1743 int i, j;
1744
1745 if (target->state != TARGET_HALTED)
1746 {
1747 LOG_WARNING("target not halted");
1748 return ERROR_TARGET_NOT_HALTED;
1749 }
1750
1751 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1752 * we can't enter User mode on an XScale (unpredictable),
1753 * but User shares registers with SYS
1754 */
1755 for (i = 1; i < 7; i++)
1756 {
1757 int dirty = 0;
1758
1759 /* check if there are invalid registers in the current mode
1760 */
1761 for (j = 8; j <= 14; j++)
1762 {
1763 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty == 1)
1764 dirty = 1;
1765 }
1766
1767 /* if not USR/SYS, check if the SPSR needs to be written */
1768 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1769 {
1770 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty == 1)
1771 dirty = 1;
1772 }
1773
1774 if (dirty)
1775 {
1776 uint32_t tmp_cpsr;
1777
1778 /* send banked registers */
1779 xscale_send_u32(target, 0x1);
1780
1781 tmp_cpsr = 0x0;
1782 tmp_cpsr |= armv4_5_number_to_mode(i);
1783 tmp_cpsr |= 0xc0; /* I/F bits */
1784
1785 /* send CPSR for desired mode */
1786 xscale_send_u32(target, tmp_cpsr);
1787
1788 /* send banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1789 for (j = 8; j <= 14; j++)
1790 {
1791 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, j).value, 0, 32));
1792 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1793 }
1794
1795 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1796 {
1797 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32));
1798 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1799 }
1800 }
1801 }
1802
1803 return ERROR_OK;
1804 }
1805
1806 static int xscale_read_memory(struct target *target, uint32_t address,
1807 uint32_t size, uint32_t count, uint8_t *buffer)
1808 {
1809 struct xscale_common *xscale = target_to_xscale(target);
1810 uint32_t *buf32;
1811 uint32_t i;
1812 int retval;
1813
1814 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1815
1816 if (target->state != TARGET_HALTED)
1817 {
1818 LOG_WARNING("target not halted");
1819 return ERROR_TARGET_NOT_HALTED;
1820 }
1821
1822 /* sanitize arguments */
1823 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1824 return ERROR_INVALID_ARGUMENTS;
1825
1826 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1827 return ERROR_TARGET_UNALIGNED_ACCESS;
1828
1829 /* send memory read request (command 0x1n, n: access size) */
1830 if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
1831 return retval;
1832
1833 /* send base address for read request */
1834 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1835 return retval;
1836
1837 /* send number of requested data words */
1838 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1839 return retval;
1840
1841 /* receive data from target (count times 32-bit words in host endianness) */
1842 buf32 = malloc(4 * count);
1843 if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
1844 return retval;
1845
1846 /* extract data from host-endian buffer into byte stream */
1847 for (i = 0; i < count; i++)
1848 {
1849 switch (size)
1850 {
1851 case 4:
1852 target_buffer_set_u32(target, buffer, buf32[i]);
1853 buffer += 4;
1854 break;
1855 case 2:
1856 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1857 buffer += 2;
1858 break;
1859 case 1:
1860 *buffer++ = buf32[i] & 0xff;
1861 break;
1862 default:
1863 LOG_ERROR("invalid read size");
1864 return ERROR_INVALID_ARGUMENTS;
1865 }
1866 }
1867
1868 free(buf32);
1869
1870 /* examine DCSR, to see if Sticky Abort (SA) got set */
1871 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1872 return retval;
1873 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1874 {
1875 /* clear SA bit */
1876 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1877 return retval;
1878
1879 return ERROR_TARGET_DATA_ABORT;
1880 }
1881
1882 return ERROR_OK;
1883 }
1884
1885 static int xscale_write_memory(struct target *target, uint32_t address,
1886 uint32_t size, uint32_t count, uint8_t *buffer)
1887 {
1888 struct xscale_common *xscale = target_to_xscale(target);
1889 int retval;
1890
1891 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1892
1893 if (target->state != TARGET_HALTED)
1894 {
1895 LOG_WARNING("target not halted");
1896 return ERROR_TARGET_NOT_HALTED;
1897 }
1898
1899 /* sanitize arguments */
1900 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1901 return ERROR_INVALID_ARGUMENTS;
1902
1903 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1904 return ERROR_TARGET_UNALIGNED_ACCESS;
1905
1906 /* send memory write request (command 0x2n, n: access size) */
1907 if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
1908 return retval;
1909
1910 /* send base address for read request */
1911 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1912 return retval;
1913
1914 /* send number of requested data words to be written*/
1915 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1916 return retval;
1917
1918 /* extract data from host-endian buffer into byte stream */
1919 #if 0
1920 for (i = 0; i < count; i++)
1921 {
1922 switch (size)
1923 {
1924 case 4:
1925 value = target_buffer_get_u32(target, buffer);
1926 xscale_send_u32(target, value);
1927 buffer += 4;
1928 break;
1929 case 2:
1930 value = target_buffer_get_u16(target, buffer);
1931 xscale_send_u32(target, value);
1932 buffer += 2;
1933 break;
1934 case 1:
1935 value = *buffer;
1936 xscale_send_u32(target, value);
1937 buffer += 1;
1938 break;
1939 default:
1940 LOG_ERROR("should never get here");
1941 exit(-1);
1942 }
1943 }
1944 #endif
1945 if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
1946 return retval;
1947
1948 /* examine DCSR, to see if Sticky Abort (SA) got set */
1949 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1950 return retval;
1951 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1952 {
1953 /* clear SA bit */
1954 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1955 return retval;
1956
1957 return ERROR_TARGET_DATA_ABORT;
1958 }
1959
1960 return ERROR_OK;
1961 }
1962
1963 static int xscale_bulk_write_memory(struct target *target, uint32_t address,
1964 uint32_t count, uint8_t *buffer)
1965 {
1966 return xscale_write_memory(target, address, 4, count, buffer);
1967 }
1968
1969 static uint32_t xscale_get_ttb(struct target *target)
1970 {
1971 struct xscale_common *xscale = target_to_xscale(target);
1972 uint32_t ttb;
1973
1974 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
1975 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
1976
1977 return ttb;
1978 }
1979
1980 static void xscale_disable_mmu_caches(struct target *target, int mmu,
1981 int d_u_cache, int i_cache)
1982 {
1983 struct xscale_common *xscale = target_to_xscale(target);
1984 uint32_t cp15_control;
1985
1986 /* read cp15 control register */
1987 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1988 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1989
1990 if (mmu)
1991 cp15_control &= ~0x1U;
1992
1993 if (d_u_cache)
1994 {
1995 /* clean DCache */
1996 xscale_send_u32(target, 0x50);
1997 xscale_send_u32(target, xscale->cache_clean_address);
1998
1999 /* invalidate DCache */
2000 xscale_send_u32(target, 0x51);
2001
2002 cp15_control &= ~0x4U;
2003 }
2004
2005 if (i_cache)
2006 {
2007 /* invalidate ICache */
2008 xscale_send_u32(target, 0x52);
2009 cp15_control &= ~0x1000U;
2010 }
2011
2012 /* write new cp15 control register */
2013 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2014
2015 /* execute cpwait to ensure outstanding operations complete */
2016 xscale_send_u32(target, 0x53);
2017 }
2018
2019 static void xscale_enable_mmu_caches(struct target *target, int mmu,
2020 int d_u_cache, int i_cache)
2021 {
2022 struct xscale_common *xscale = target_to_xscale(target);
2023 uint32_t cp15_control;
2024
2025 /* read cp15 control register */
2026 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2027 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2028
2029 if (mmu)
2030 cp15_control |= 0x1U;
2031
2032 if (d_u_cache)
2033 cp15_control |= 0x4U;
2034
2035 if (i_cache)
2036 cp15_control |= 0x1000U;
2037
2038 /* write new cp15 control register */
2039 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2040
2041 /* execute cpwait to ensure outstanding operations complete */
2042 xscale_send_u32(target, 0x53);
2043 }
2044
2045 static int xscale_set_breakpoint(struct target *target,
2046 struct breakpoint *breakpoint)
2047 {
2048 int retval;
2049 struct xscale_common *xscale = target_to_xscale(target);
2050
2051 if (target->state != TARGET_HALTED)
2052 {
2053 LOG_WARNING("target not halted");
2054 return ERROR_TARGET_NOT_HALTED;
2055 }
2056
2057 if (breakpoint->set)
2058 {
2059 LOG_WARNING("breakpoint already set");
2060 return ERROR_OK;
2061 }
2062
2063 if (breakpoint->type == BKPT_HARD)
2064 {
2065 uint32_t value = breakpoint->address | 1;
2066 if (!xscale->ibcr0_used)
2067 {
2068 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2069 xscale->ibcr0_used = 1;
2070 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2071 }
2072 else if (!xscale->ibcr1_used)
2073 {
2074 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2075 xscale->ibcr1_used = 1;
2076 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2077 }
2078 else
2079 {
2080 LOG_ERROR("BUG: no hardware comparator available");
2081 return ERROR_OK;
2082 }
2083 }
2084 else if (breakpoint->type == BKPT_SOFT)
2085 {
2086 if (breakpoint->length == 4)
2087 {
2088 /* keep the original instruction in target endianness */
2089 if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2090 {
2091 return retval;
2092 }
2093 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2094 if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2095 {
2096 return retval;
2097 }
2098 }
2099 else
2100 {
2101 /* keep the original instruction in target endianness */
2102 if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2103 {
2104 return retval;
2105 }
2106 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2107 if ((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2108 {
2109 return retval;
2110 }
2111 }
2112 breakpoint->set = 1;
2113 }
2114
2115 return ERROR_OK;
2116 }
2117
2118 static int xscale_add_breakpoint(struct target *target,
2119 struct breakpoint *breakpoint)
2120 {
2121 struct xscale_common *xscale = target_to_xscale(target);
2122
2123 if (target->state != TARGET_HALTED)
2124 {
2125 LOG_WARNING("target not halted");
2126 return ERROR_TARGET_NOT_HALTED;
2127 }
2128
2129 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2130 {
2131 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2132 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2133 }
2134
2135 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2136 {
2137 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2138 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2139 }
2140
2141 if (breakpoint->type == BKPT_HARD)
2142 {
2143 xscale->ibcr_available--;
2144 }
2145
2146 return ERROR_OK;
2147 }
2148
2149 static int xscale_unset_breakpoint(struct target *target,
2150 struct breakpoint *breakpoint)
2151 {
2152 int retval;
2153 struct xscale_common *xscale = target_to_xscale(target);
2154
2155 if (target->state != TARGET_HALTED)
2156 {
2157 LOG_WARNING("target not halted");
2158 return ERROR_TARGET_NOT_HALTED;
2159 }
2160
2161 if (!breakpoint->set)
2162 {
2163 LOG_WARNING("breakpoint not set");
2164 return ERROR_OK;
2165 }
2166
2167 if (breakpoint->type == BKPT_HARD)
2168 {
2169 if (breakpoint->set == 1)
2170 {
2171 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2172 xscale->ibcr0_used = 0;
2173 }
2174 else if (breakpoint->set == 2)
2175 {
2176 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2177 xscale->ibcr1_used = 0;
2178 }
2179 breakpoint->set = 0;
2180 }
2181 else
2182 {
2183 /* restore original instruction (kept in target endianness) */
2184 if (breakpoint->length == 4)
2185 {
2186 if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2187 {
2188 return retval;
2189 }
2190 }
2191 else
2192 {
2193 if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2194 {
2195 return retval;
2196 }
2197 }
2198 breakpoint->set = 0;
2199 }
2200
2201 return ERROR_OK;
2202 }
2203
2204 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2205 {
2206 struct xscale_common *xscale = target_to_xscale(target);
2207
2208 if (target->state != TARGET_HALTED)
2209 {
2210 LOG_WARNING("target not halted");
2211 return ERROR_TARGET_NOT_HALTED;
2212 }
2213
2214 if (breakpoint->set)
2215 {
2216 xscale_unset_breakpoint(target, breakpoint);
2217 }
2218
2219 if (breakpoint->type == BKPT_HARD)
2220 xscale->ibcr_available++;
2221
2222 return ERROR_OK;
2223 }
2224
2225 static int xscale_set_watchpoint(struct target *target,
2226 struct watchpoint *watchpoint)
2227 {
2228 struct xscale_common *xscale = target_to_xscale(target);
2229 uint8_t enable = 0;
2230 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2231 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2232
2233 if (target->state != TARGET_HALTED)
2234 {
2235 LOG_WARNING("target not halted");
2236 return ERROR_TARGET_NOT_HALTED;
2237 }
2238
2239 xscale_get_reg(dbcon);
2240
2241 switch (watchpoint->rw)
2242 {
2243 case WPT_READ:
2244 enable = 0x3;
2245 break;
2246 case WPT_ACCESS:
2247 enable = 0x2;
2248 break;
2249 case WPT_WRITE:
2250 enable = 0x1;
2251 break;
2252 default:
2253 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2254 }
2255
2256 if (!xscale->dbr0_used)
2257 {
2258 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2259 dbcon_value |= enable;
2260 xscale_set_reg_u32(dbcon, dbcon_value);
2261 watchpoint->set = 1;
2262 xscale->dbr0_used = 1;
2263 }
2264 else if (!xscale->dbr1_used)
2265 {
2266 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2267 dbcon_value |= enable << 2;
2268 xscale_set_reg_u32(dbcon, dbcon_value);
2269 watchpoint->set = 2;
2270 xscale->dbr1_used = 1;
2271 }
2272 else
2273 {
2274 LOG_ERROR("BUG: no hardware comparator available");
2275 return ERROR_OK;
2276 }
2277
2278 return ERROR_OK;
2279 }
2280
2281 static int xscale_add_watchpoint(struct target *target,
2282 struct watchpoint *watchpoint)
2283 {
2284 struct xscale_common *xscale = target_to_xscale(target);
2285
2286 if (target->state != TARGET_HALTED)
2287 {
2288 LOG_WARNING("target not halted");
2289 return ERROR_TARGET_NOT_HALTED;
2290 }
2291
2292 if (xscale->dbr_available < 1)
2293 {
2294 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2295 }
2296
2297 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2298 {
2299 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2300 }
2301
2302 xscale->dbr_available--;
2303
2304 return ERROR_OK;
2305 }
2306
2307 static int xscale_unset_watchpoint(struct target *target,
2308 struct watchpoint *watchpoint)
2309 {
2310 struct xscale_common *xscale = target_to_xscale(target);
2311 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2312 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2313
2314 if (target->state != TARGET_HALTED)
2315 {
2316 LOG_WARNING("target not halted");
2317 return ERROR_TARGET_NOT_HALTED;
2318 }
2319
2320 if (!watchpoint->set)
2321 {
2322 LOG_WARNING("breakpoint not set");
2323 return ERROR_OK;
2324 }
2325
2326 if (watchpoint->set == 1)
2327 {
2328 dbcon_value &= ~0x3;
2329 xscale_set_reg_u32(dbcon, dbcon_value);
2330 xscale->dbr0_used = 0;
2331 }
2332 else if (watchpoint->set == 2)
2333 {
2334 dbcon_value &= ~0xc;
2335 xscale_set_reg_u32(dbcon, dbcon_value);
2336 xscale->dbr1_used = 0;
2337 }
2338 watchpoint->set = 0;
2339
2340 return ERROR_OK;
2341 }
2342
2343 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2344 {
2345 struct xscale_common *xscale = target_to_xscale(target);
2346
2347 if (target->state != TARGET_HALTED)
2348 {
2349 LOG_WARNING("target not halted");
2350 return ERROR_TARGET_NOT_HALTED;
2351 }
2352
2353 if (watchpoint->set)
2354 {
2355 xscale_unset_watchpoint(target, watchpoint);
2356 }
2357
2358 xscale->dbr_available++;
2359
2360 return ERROR_OK;
2361 }
2362
2363 static int xscale_get_reg(struct reg *reg)
2364 {
2365 struct xscale_reg *arch_info = reg->arch_info;
2366 struct target *target = arch_info->target;
2367 struct xscale_common *xscale = target_to_xscale(target);
2368
2369 /* DCSR, TX and RX are accessible via JTAG */
2370 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2371 {
2372 return xscale_read_dcsr(arch_info->target);
2373 }
2374 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2375 {
2376 /* 1 = consume register content */
2377 return xscale_read_tx(arch_info->target, 1);
2378 }
2379 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2380 {
2381 /* can't read from RX register (host -> debug handler) */
2382 return ERROR_OK;
2383 }
2384 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2385 {
2386 /* can't (explicitly) read from TXRXCTRL register */
2387 return ERROR_OK;
2388 }
2389 else /* Other DBG registers have to be transfered by the debug handler */
2390 {
2391 /* send CP read request (command 0x40) */
2392 xscale_send_u32(target, 0x40);
2393
2394 /* send CP register number */
2395 xscale_send_u32(target, arch_info->dbg_handler_number);
2396
2397 /* read register value */
2398 xscale_read_tx(target, 1);
2399 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2400
2401 reg->dirty = 0;
2402 reg->valid = 1;
2403 }
2404
2405 return ERROR_OK;
2406 }
2407
2408 static int xscale_set_reg(struct reg *reg, uint8_t* buf)
2409 {
2410 struct xscale_reg *arch_info = reg->arch_info;
2411 struct target *target = arch_info->target;
2412 struct xscale_common *xscale = target_to_xscale(target);
2413 uint32_t value = buf_get_u32(buf, 0, 32);
2414
2415 /* DCSR, TX and RX are accessible via JTAG */
2416 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2417 {
2418 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2419 return xscale_write_dcsr(arch_info->target, -1, -1);
2420 }
2421 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2422 {
2423 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2424 return xscale_write_rx(arch_info->target);
2425 }
2426 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2427 {
2428 /* can't write to TX register (debug-handler -> host) */
2429 return ERROR_OK;
2430 }
2431 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2432 {
2433 /* can't (explicitly) write to TXRXCTRL register */
2434 return ERROR_OK;
2435 }
2436 else /* Other DBG registers have to be transfered by the debug handler */
2437 {
2438 /* send CP write request (command 0x41) */
2439 xscale_send_u32(target, 0x41);
2440
2441 /* send CP register number */
2442 xscale_send_u32(target, arch_info->dbg_handler_number);
2443
2444 /* send CP register value */
2445 xscale_send_u32(target, value);
2446 buf_set_u32(reg->value, 0, 32, value);
2447 }
2448
2449 return ERROR_OK;
2450 }
2451
2452 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2453 {
2454 struct xscale_common *xscale = target_to_xscale(target);
2455 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2456 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2457
2458 /* send CP write request (command 0x41) */
2459 xscale_send_u32(target, 0x41);
2460
2461 /* send CP register number */
2462 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2463
2464 /* send CP register value */
2465 xscale_send_u32(target, value);
2466 buf_set_u32(dcsr->value, 0, 32, value);
2467
2468 return ERROR_OK;
2469 }
2470
2471 static int xscale_read_trace(struct target *target)
2472 {
2473 struct xscale_common *xscale = target_to_xscale(target);
2474 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
2475 struct xscale_trace_data **trace_data_p;
2476
2477 /* 258 words from debug handler
2478 * 256 trace buffer entries
2479 * 2 checkpoint addresses
2480 */
2481 uint32_t trace_buffer[258];
2482 int is_address[256];
2483 int i, j;
2484
2485 if (target->state != TARGET_HALTED)
2486 {
2487 LOG_WARNING("target must be stopped to read trace data");
2488 return ERROR_TARGET_NOT_HALTED;
2489 }
2490
2491 /* send read trace buffer command (command 0x61) */
2492 xscale_send_u32(target, 0x61);
2493
2494 /* receive trace buffer content */
2495 xscale_receive(target, trace_buffer, 258);
2496
2497 /* parse buffer backwards to identify address entries */
2498 for (i = 255; i >= 0; i--)
2499 {
2500 is_address[i] = 0;
2501 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2502 ((trace_buffer[i] & 0xf0) == 0xd0))
2503 {
2504 if (i >= 3)
2505 is_address[--i] = 1;
2506 if (i >= 2)
2507 is_address[--i] = 1;
2508 if (i >= 1)
2509 is_address[--i] = 1;
2510 if (i >= 0)
2511 is_address[--i] = 1;
2512 }
2513 }
2514
2515
2516 /* search first non-zero entry */
2517 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2518 ;
2519
2520 if (j == 256)
2521 {
2522 LOG_DEBUG("no trace data collected");
2523 return ERROR_XSCALE_NO_TRACE_DATA;
2524 }
2525
2526 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2527 ;
2528
2529 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2530 (*trace_data_p)->next = NULL;
2531 (*trace_data_p)->chkpt0 = trace_buffer[256];
2532 (*trace_data_p)->chkpt1 = trace_buffer[257];
2533 (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
2534 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2535 (*trace_data_p)->depth = 256 - j;
2536
2537 for (i = j; i < 256; i++)
2538 {
2539 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2540 if (is_address[i])
2541 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2542 else
2543 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2544 }
2545
2546 return ERROR_OK;
2547 }
2548
2549 static int xscale_read_instruction(struct target *target,
2550 struct arm_instruction *instruction)
2551 {
2552 struct xscale_common *xscale = target_to_xscale(target);
2553 int i;
2554 int section = -1;
2555 size_t size_read;
2556 uint32_t opcode;
2557 int retval;
2558
2559 if (!xscale->trace.image)
2560 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2561
2562 /* search for the section the current instruction belongs to */
2563 for (i = 0; i < xscale->trace.image->num_sections; i++)
2564 {
2565 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2566 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2567 {
2568 section = i;
2569 break;
2570 }
2571 }
2572
2573 if (section == -1)
2574 {
2575 /* current instruction couldn't be found in the image */
2576 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2577 }
2578
2579 if (xscale->trace.core_state == ARMV4_5_STATE_ARM)
2580 {
2581 uint8_t buf[4];
2582 if ((retval = image_read_section(xscale->trace.image, section,
2583 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2584 4, buf, &size_read)) != ERROR_OK)
2585 {
2586 LOG_ERROR("error while reading instruction: %i", retval);
2587 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2588 }
2589 opcode = target_buffer_get_u32(target, buf);
2590 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2591 }
2592 else if (xscale->trace.core_state == ARMV4_5_STATE_THUMB)
2593 {
2594 uint8_t buf[2];
2595 if ((retval = image_read_section(xscale->trace.image, section,
2596 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2597 2, buf, &size_read)) != ERROR_OK)
2598 {
2599 LOG_ERROR("error while reading instruction: %i", retval);
2600 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2601 }
2602 opcode = target_buffer_get_u16(target, buf);
2603 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2604 }
2605 else
2606 {
2607 LOG_ERROR("BUG: unknown core state encountered");
2608 exit(-1);
2609 }
2610
2611 return ERROR_OK;
2612 }
2613
2614 static int xscale_branch_address(struct xscale_trace_data *trace_data,
2615 int i, uint32_t *target)
2616 {
2617 /* if there are less than four entries prior to the indirect branch message
2618 * we can't extract the address */
2619 if (i < 4)
2620 {
2621 return -1;
2622 }
2623
2624 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2625 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2626
2627 return 0;
2628 }
2629
2630 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2631 {
2632 struct xscale_common *xscale = target_to_xscale(target);
2633 int next_pc_ok = 0;
2634 uint32_t next_pc = 0x0;
2635 struct xscale_trace_data *trace_data = xscale->trace.data;
2636 int retval;
2637
2638 while (trace_data)
2639 {
2640 int i, chkpt;
2641 int rollover;
2642 int branch;
2643 int exception;
2644 xscale->trace.core_state = ARMV4_5_STATE_ARM;
2645
2646 chkpt = 0;
2647 rollover = 0;
2648
2649 for (i = 0; i < trace_data->depth; i++)
2650 {
2651 next_pc_ok = 0;
2652 branch = 0;
2653 exception = 0;
2654
2655 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2656 continue;
2657
2658 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2659 {
2660 case 0: /* Exceptions */
2661 case 1:
2662 case 2:
2663 case 3:
2664 case 4:
2665 case 5:
2666 case 6:
2667 case 7:
2668 exception = (trace_data->entries[i].data & 0x70) >> 4;
2669 next_pc_ok = 1;
2670 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2671 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2672 break;
2673 case 8: /* Direct Branch */
2674 branch = 1;
2675 break;
2676 case 9: /* Indirect Branch */
2677 branch = 1;
2678 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2679 {
2680 next_pc_ok = 1;
2681 }
2682 break;
2683 case 13: /* Checkpointed Indirect Branch */
2684 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2685 {
2686 next_pc_ok = 1;
2687 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2688 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2689 LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2690 }
2691 /* explicit fall-through */
2692 case 12: /* Checkpointed Direct Branch */
2693 branch = 1;
2694 if (chkpt == 0)
2695 {
2696 next_pc_ok = 1;
2697 next_pc = trace_data->chkpt0;
2698 chkpt++;
2699 }
2700 else if (chkpt == 1)
2701 {
2702 next_pc_ok = 1;
2703 next_pc = trace_data->chkpt0;
2704 chkpt++;
2705 }
2706 else
2707 {
2708 LOG_WARNING("more than two checkpointed branches encountered");
2709 }
2710 break;
2711 case 15: /* Roll-over */
2712 rollover++;
2713 continue;
2714 default: /* Reserved */
2715 command_print(cmd_ctx, "--- reserved trace message ---");
2716 LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2717 return ERROR_OK;
2718 }
2719
2720 if (xscale->trace.pc_ok)
2721 {
2722 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2723 struct arm_instruction instruction;
2724
2725 if ((exception == 6) || (exception == 7))
2726 {
2727 /* IRQ or FIQ exception, no instruction executed */
2728 executed -= 1;
2729 }
2730
2731 while (executed-- >= 0)
2732 {
2733 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2734 {
2735 /* can't continue tracing with no image available */
2736 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2737 {
2738 return retval;
2739 }
2740 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2741 {
2742 /* TODO: handle incomplete images */
2743 }
2744 }
2745
2746 /* a precise abort on a load to the PC is included in the incremental
2747 * word count, other instructions causing data aborts are not included
2748 */
2749 if ((executed == 0) && (exception == 4)
2750 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2751 {
2752 if ((instruction.type == ARM_LDM)
2753 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2754 {
2755 executed--;
2756 }
2757 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2758 && (instruction.info.load_store.Rd != 15))
2759 {
2760 executed--;
2761 }
2762 }
2763
2764 /* only the last instruction executed
2765 * (the one that caused the control flow change)
2766 * could be a taken branch
2767 */
2768 if (((executed == -1) && (branch == 1)) &&
2769 (((instruction.type == ARM_B) ||
2770 (instruction.type == ARM_BL) ||
2771 (instruction.type == ARM_BLX)) &&
2772 (instruction.info.b_bl_bx_blx.target_address != 0xffffffff)))
2773 {
2774 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2775 }
2776 else
2777 {
2778 xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2;
2779 }
2780 command_print(cmd_ctx, "%s", instruction.text);
2781 }
2782
2783 rollover = 0;
2784 }
2785
2786 if (next_pc_ok)
2787 {
2788 xscale->trace.current_pc = next_pc;
2789 xscale->trace.pc_ok = 1;
2790 }
2791 }
2792
2793 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2)
2794 {
2795 struct arm_instruction instruction;
2796 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2797 {
2798 /* can't continue tracing with no image available */
2799 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2800 {
2801 return retval;
2802 }
2803 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2804 {
2805 /* TODO: handle incomplete images */
2806 }
2807 }
2808 command_print(cmd_ctx, "%s", instruction.text);
2809 }
2810
2811 trace_data = trace_data->next;
2812 }
2813
2814 return ERROR_OK;
2815 }
2816
2817 static const struct reg_arch_type xscale_reg_type = {
2818 .get = xscale_get_reg,
2819 .set = xscale_set_reg,
2820 };
2821
2822 static void xscale_build_reg_cache(struct target *target)
2823 {
2824 struct xscale_common *xscale = target_to_xscale(target);
2825 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
2826 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2827 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2828 int i;
2829 int num_regs = sizeof(xscale_reg_arch_info) / sizeof(struct xscale_reg);
2830
2831 (*cache_p) = armv4_5_build_reg_cache(target, armv4_5);
2832 armv4_5->core_cache = (*cache_p);
2833
2834 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2835 cache_p = &(*cache_p)->next;
2836
2837 /* fill in values for the xscale reg cache */
2838 (*cache_p)->name = "XScale registers";
2839 (*cache_p)->next = NULL;
2840 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
2841 (*cache_p)->num_regs = num_regs;
2842
2843 for (i = 0; i < num_regs; i++)
2844 {
2845 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2846 (*cache_p)->reg_list[i].value = calloc(4, 1);
2847 (*cache_p)->reg_list[i].dirty = 0;
2848 (*cache_p)->reg_list[i].valid = 0;
2849 (*cache_p)->reg_list[i].size = 32;
2850 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2851 (*cache_p)->reg_list[i].type = &xscale_reg_type;
2852 arch_info[i] = xscale_reg_arch_info[i];
2853 arch_info[i].target = target;
2854 }
2855
2856 xscale->reg_cache = (*cache_p);
2857 }
2858
2859 static int xscale_init_target(struct command_context *cmd_ctx,
2860 struct target *target)
2861 {
2862 xscale_build_reg_cache(target);
2863 return ERROR_OK;
2864 }
2865
2866 static int xscale_init_arch_info(struct target *target,
2867 struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
2868 {
2869 struct arm *armv4_5;
2870 uint32_t high_reset_branch, low_reset_branch;
2871 int i;
2872
2873 armv4_5 = &xscale->armv4_5_common;
2874
2875 /* store architecture specfic data (none so far) */
2876 xscale->common_magic = XSCALE_COMMON_MAGIC;
2877
2878 /* we don't really *need* variant info ... */
2879 if (variant) {
2880 int ir_length = 0;
2881
2882 if (strcmp(variant, "pxa250") == 0
2883 || strcmp(variant, "pxa255") == 0
2884 || strcmp(variant, "pxa26x") == 0)
2885 ir_length = 5;
2886 else if (strcmp(variant, "pxa27x") == 0
2887 || strcmp(variant, "ixp42x") == 0
2888 || strcmp(variant, "ixp45x") == 0
2889 || strcmp(variant, "ixp46x") == 0)
2890 ir_length = 7;
2891 else
2892 LOG_WARNING("%s: unrecognized variant %s",
2893 tap->dotted_name, variant);
2894
2895 if (ir_length && ir_length != tap->ir_length) {
2896 LOG_WARNING("%s: IR length for %s is %d; fixing",
2897 tap->dotted_name, variant, ir_length);
2898 tap->ir_length = ir_length;
2899 }
2900 }
2901
2902 /* the debug handler isn't installed (and thus not running) at this time */
2903 xscale->handler_address = 0xfe000800;
2904
2905 /* clear the vectors we keep locally for reference */
2906 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2907 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2908
2909 /* no user-specified vectors have been configured yet */
2910 xscale->static_low_vectors_set = 0x0;
2911 xscale->static_high_vectors_set = 0x0;
2912
2913 /* calculate branches to debug handler */
2914 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
2915 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
2916
2917 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
2918 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
2919
2920 for (i = 1; i <= 7; i++)
2921 {
2922 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2923 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2924 }
2925
2926 /* 64kB aligned region used for DCache cleaning */
2927 xscale->cache_clean_address = 0xfffe0000;
2928
2929 xscale->hold_rst = 0;
2930 xscale->external_debug_break = 0;
2931
2932 xscale->ibcr_available = 2;
2933 xscale->ibcr0_used = 0;
2934 xscale->ibcr1_used = 0;
2935
2936 xscale->dbr_available = 2;
2937 xscale->dbr0_used = 0;
2938 xscale->dbr1_used = 0;
2939
2940 xscale->arm_bkpt = ARMV5_BKPT(0x0);
2941 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
2942
2943 xscale->vector_catch = 0x1;
2944
2945 xscale->trace.capture_status = TRACE_IDLE;
2946 xscale->trace.data = NULL;
2947 xscale->trace.image = NULL;
2948 xscale->trace.buffer_enabled = 0;
2949 xscale->trace.buffer_fill = 0;
2950
2951 /* prepare ARMv4/5 specific information */
2952 armv4_5->arch_info = xscale;
2953 armv4_5->read_core_reg = xscale_read_core_reg;
2954 armv4_5->write_core_reg = xscale_write_core_reg;
2955 armv4_5->full_context = xscale_full_context;
2956
2957 armv4_5_init_arch_info(target, armv4_5);
2958
2959 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
2960 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
2961 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
2962 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
2963 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
2964 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
2965 xscale->armv4_5_mmu.has_tiny_pages = 1;
2966 xscale->armv4_5_mmu.mmu_enabled = 0;
2967
2968 return ERROR_OK;
2969 }
2970
2971 static int xscale_target_create(struct target *target, Jim_Interp *interp)
2972 {
2973 struct xscale_common *xscale;
2974
2975 if (sizeof xscale_debug_handler - 1 > 0x800) {
2976 LOG_ERROR("debug_handler.bin: larger than 2kb");
2977 return ERROR_FAIL;
2978 }
2979
2980 xscale = calloc(1, sizeof(*xscale));
2981 if (!xscale)
2982 return ERROR_FAIL;
2983
2984 return xscale_init_arch_info(target, xscale, target->tap,
2985 target->variant);
2986 }
2987
2988 COMMAND_HANDLER(xscale_handle_debug_handler_command)
2989 {
2990 struct target *target = NULL;
2991 struct xscale_common *xscale;
2992 int retval;
2993 uint32_t handler_address;
2994
2995 if (CMD_ARGC < 2)
2996 {
2997 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
2998 return ERROR_OK;
2999 }
3000
3001 if ((target = get_target(CMD_ARGV[0])) == NULL)
3002 {
3003 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3004 return ERROR_FAIL;
3005 }
3006
3007 xscale = target_to_xscale(target);
3008 retval = xscale_verify_pointer(CMD_CTX, xscale);
3009 if (retval != ERROR_OK)
3010 return retval;
3011
3012 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3013
3014 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3015 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3016 {
3017 xscale->handler_address = handler_address;
3018 }
3019 else
3020 {
3021 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3022 return ERROR_FAIL;
3023 }
3024
3025 return ERROR_OK;
3026 }
3027
3028 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3029 {
3030 struct target *target = NULL;
3031 struct xscale_common *xscale;
3032 int retval;
3033 uint32_t cache_clean_address;
3034
3035 if (CMD_ARGC < 2)
3036 {
3037 return ERROR_COMMAND_SYNTAX_ERROR;
3038 }
3039
3040 target = get_target(CMD_ARGV[0]);
3041 if (target == NULL)
3042 {
3043 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3044 return ERROR_FAIL;
3045 }
3046 xscale = target_to_xscale(target);
3047 retval = xscale_verify_pointer(CMD_CTX, xscale);
3048 if (retval != ERROR_OK)
3049 return retval;
3050
3051 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3052
3053 if (cache_clean_address & 0xffff)
3054 {
3055 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3056 }
3057 else
3058 {
3059 xscale->cache_clean_address = cache_clean_address;
3060 }
3061
3062 return ERROR_OK;
3063 }
3064
3065 COMMAND_HANDLER(xscale_handle_cache_info_command)
3066 {
3067 struct target *target = get_current_target(CMD_CTX);
3068 struct xscale_common *xscale = target_to_xscale(target);
3069 int retval;
3070
3071 retval = xscale_verify_pointer(CMD_CTX, xscale);
3072 if (retval != ERROR_OK)
3073 return retval;
3074
3075 return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
3076 }
3077
3078 static int xscale_virt2phys(struct target *target,
3079 uint32_t virtual, uint32_t *physical)
3080 {
3081 struct xscale_common *xscale = target_to_xscale(target);
3082 int type;
3083 uint32_t cb;
3084 int domain;
3085 uint32_t ap;
3086
3087 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3088 LOG_ERROR(xscale_not);
3089 return ERROR_TARGET_INVALID;
3090 }
3091
3092 uint32_t ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3093 if (type == -1)
3094 {
3095 return ret;
3096 }
3097 *physical = ret;
3098 return ERROR_OK;
3099 }
3100
3101 static int xscale_mmu(struct target *target, int *enabled)
3102 {
3103 struct xscale_common *xscale = target_to_xscale(target);
3104
3105 if (target->state != TARGET_HALTED)
3106 {
3107 LOG_ERROR("Target not halted");
3108 return ERROR_TARGET_INVALID;
3109 }
3110 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3111 return ERROR_OK;
3112 }
3113
3114 COMMAND_HANDLER(xscale_handle_mmu_command)
3115 {
3116 struct target *target = get_current_target(CMD_CTX);
3117 struct xscale_common *xscale = target_to_xscale(target);
3118 int retval;
3119
3120 retval = xscale_verify_pointer(CMD_CTX, xscale);
3121 if (retval != ERROR_OK)
3122 return retval;
3123
3124 if (target->state != TARGET_HALTED)
3125 {
3126 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3127 return ERROR_OK;
3128 }
3129
3130 if (CMD_ARGC >= 1)
3131 {
3132 bool enable;
3133 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3134 if (enable)
3135 xscale_enable_mmu_caches(target, 1, 0, 0);
3136 else
3137 xscale_disable_mmu_caches(target, 1, 0, 0);
3138 xscale->armv4_5_mmu.mmu_enabled = enable;
3139 }
3140
3141 command_print(CMD_CTX, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3142
3143 return ERROR_OK;
3144 }
3145
3146 COMMAND_HANDLER(xscale_handle_idcache_command)
3147 {
3148 struct target *target = get_current_target(CMD_CTX);
3149 struct xscale_common *xscale = target_to_xscale(target);
3150
3151 int retval = xscale_verify_pointer(CMD_CTX, xscale);
3152 if (retval != ERROR_OK)
3153 return retval;
3154
3155 if (target->state != TARGET_HALTED)
3156 {
3157 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3158 return ERROR_OK;
3159 }
3160
3161 bool icache;
3162 COMMAND_PARSE_BOOL(CMD_NAME, icache, "icache", "dcache");
3163
3164 if (CMD_ARGC >= 1)
3165 {
3166 bool enable;
3167 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3168 if (enable)
3169 xscale_enable_mmu_caches(target, 1, 0, 0);
3170 else
3171 xscale_disable_mmu_caches(target, 1, 0, 0);
3172 if (icache)
3173 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3174 else
3175 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3176 }
3177
3178 bool enabled = icache ?
3179 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3180 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3181 const char *msg = enabled ? "enabled" : "disabled";
3182 command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
3183
3184 return ERROR_OK;
3185 }
3186
3187 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3188 {
3189 struct target *target = get_current_target(CMD_CTX);
3190 struct xscale_common *xscale = target_to_xscale(target);
3191 int retval;
3192
3193 retval = xscale_verify_pointer(CMD_CTX, xscale);
3194 if (retval != ERROR_OK)
3195 return retval;
3196
3197 if (CMD_ARGC < 1)
3198 {
3199 command_print(CMD_CTX, "usage: xscale vector_catch [mask]");
3200 }
3201 else
3202 {
3203 COMMAND_PARSE_NUMBER(u8, CMD_ARGV[0], xscale->vector_catch);
3204 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3205 xscale_write_dcsr(target, -1, -1);
3206 }
3207
3208 command_print(CMD_CTX, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3209
3210 return ERROR_OK;
3211 }
3212
3213
3214 COMMAND_HANDLER(xscale_handle_vector_table_command)
3215 {
3216 struct target *target = get_current_target(CMD_CTX);
3217 struct xscale_common *xscale = target_to_xscale(target);
3218 int err = 0;
3219 int retval;
3220
3221 retval = xscale_verify_pointer(CMD_CTX, xscale);
3222 if (retval != ERROR_OK)
3223 return retval;
3224
3225 if (CMD_ARGC == 0) /* print current settings */
3226 {
3227 int idx;
3228
3229 command_print(CMD_CTX, "active user-set static vectors:");
3230 for (idx = 1; idx < 8; idx++)
3231 if (xscale->static_low_vectors_set & (1 << idx))
3232 command_print(CMD_CTX, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
3233 for (idx = 1; idx < 8; idx++)
3234 if (xscale->static_high_vectors_set & (1 << idx))
3235 command_print(CMD_CTX, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
3236 return ERROR_OK;
3237 }
3238
3239 if (CMD_ARGC != 3)
3240 err = 1;
3241 else
3242 {
3243 int idx;
3244 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3245 uint32_t vec;
3246 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3247
3248 if (idx < 1 || idx >= 8)
3249 err = 1;
3250
3251 if (!err && strcmp(CMD_ARGV[0], "low") == 0)
3252 {
3253 xscale->static_low_vectors_set |= (1<<idx);
3254 xscale->static_low_vectors[idx] = vec;
3255 }
3256 else if (!err && (strcmp(CMD_ARGV[0], "high") == 0))
3257 {
3258 xscale->static_high_vectors_set |= (1<<idx);
3259 xscale->static_high_vectors[idx] = vec;
3260 }
3261 else
3262 err = 1;
3263 }
3264
3265 if (err)
3266 command_print(CMD_CTX, "usage: xscale vector_table <high|low> <index> <code>");
3267
3268 return ERROR_OK;
3269 }
3270
3271
3272 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3273 {
3274 struct target *target = get_current_target(CMD_CTX);
3275 struct xscale_common *xscale = target_to_xscale(target);
3276 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
3277 uint32_t dcsr_value;
3278 int retval;
3279
3280 retval = xscale_verify_pointer(CMD_CTX, xscale);
3281 if (retval != ERROR_OK)
3282 return retval;
3283
3284 if (target->state != TARGET_HALTED)
3285 {
3286 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3287 return ERROR_OK;
3288 }
3289
3290 if ((CMD_ARGC >= 1) && (strcmp("enable", CMD_ARGV[0]) == 0))
3291 {
3292 struct xscale_trace_data *td, *next_td;
3293 xscale->trace.buffer_enabled = 1;
3294
3295 /* free old trace data */
3296 td = xscale->trace.data;
3297 while (td)
3298 {
3299 next_td = td->next;
3300
3301 if (td->entries)
3302 free(td->entries);
3303 free(td);
3304 td = next_td;
3305 }
3306 xscale->trace.data = NULL;
3307 }
3308 else if ((CMD_ARGC >= 1) && (strcmp("disable", CMD_ARGV[0]) == 0))
3309 {
3310 xscale->trace.buffer_enabled = 0;
3311 }
3312
3313 if ((CMD_ARGC >= 2) && (strcmp("fill", CMD_ARGV[1]) == 0))
3314 {
3315 uint32_t fill = 1;
3316 if (CMD_ARGC >= 3)
3317 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], fill);
3318 xscale->trace.buffer_fill = fill;
3319 }
3320 else if ((CMD_ARGC >= 2) && (strcmp("wrap", CMD_ARGV[1]) == 0))
3321 {
3322 xscale->trace.buffer_fill = -1;
3323 }
3324
3325 if (xscale->trace.buffer_enabled)
3326 {
3327 /* if we enable the trace buffer in fill-once
3328 * mode we know the address of the first instruction */
3329 xscale->trace.pc_ok = 1;
3330 xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
3331 }
3332 else
3333 {
3334 /* otherwise the address is unknown, and we have no known good PC */
3335 xscale->trace.pc_ok = 0;
3336 }
3337
3338 command_print(CMD_CTX, "trace buffer %s (%s)",
3339 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3340 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3341
3342 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3343 if (xscale->trace.buffer_fill >= 0)
3344 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3345 else
3346 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3347
3348 return ERROR_OK;
3349 }
3350
3351 COMMAND_HANDLER(xscale_handle_trace_image_command)
3352 {
3353 struct target *target = get_current_target(CMD_CTX);
3354 struct xscale_common *xscale = target_to_xscale(target);
3355 int retval;
3356
3357 if (CMD_ARGC < 1)
3358 {
3359 command_print(CMD_CTX, "usage: xscale trace_image <file> [base address] [type]");
3360 return ERROR_OK;
3361 }
3362
3363 retval = xscale_verify_pointer(CMD_CTX, xscale);
3364 if (retval != ERROR_OK)
3365 return retval;
3366
3367 if (xscale->trace.image)
3368 {
3369 image_close(xscale->trace.image);
3370 free(xscale->trace.image);
3371 command_print(CMD_CTX, "previously loaded image found and closed");
3372 }
3373
3374 xscale->trace.image = malloc(sizeof(struct image));
3375 xscale->trace.image->base_address_set = 0;
3376 xscale->trace.image->start_address_set = 0;
3377
3378 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3379 if (CMD_ARGC >= 2)
3380 {
3381 xscale->trace.image->base_address_set = 1;
3382 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], xscale->trace.image->base_address);
3383 }
3384 else
3385 {
3386 xscale->trace.image->base_address_set = 0;
3387 }
3388
3389 if (image_open(xscale->trace.image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3390 {
3391 free(xscale->trace.image);
3392 xscale->trace.image = NULL;
3393 return ERROR_OK;
3394 }
3395
3396 return ERROR_OK;
3397 }
3398
3399 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3400 {
3401 struct target *target = get_current_target(CMD_CTX);
3402 struct xscale_common *xscale = target_to_xscale(target);
3403 struct xscale_trace_data *trace_data;
3404 struct fileio file;
3405 int retval;
3406
3407 retval = xscale_verify_pointer(CMD_CTX, xscale);
3408 if (retval != ERROR_OK)
3409 return retval;
3410
3411 if (target->state != TARGET_HALTED)
3412 {
3413 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3414 return ERROR_OK;
3415 }
3416
3417 if (CMD_ARGC < 1)
3418 {
3419 command_print(CMD_CTX, "usage: xscale dump_trace <file>");
3420 return ERROR_OK;
3421 }
3422
3423 trace_data = xscale->trace.data;
3424
3425 if (!trace_data)
3426 {
3427 command_print(CMD_CTX, "no trace data collected");
3428 return ERROR_OK;
3429 }
3430
3431 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3432 {
3433 return ERROR_OK;
3434 }
3435
3436 while (trace_data)
3437 {
3438 int i;
3439
3440 fileio_write_u32(&file, trace_data->chkpt0);
3441 fileio_write_u32(&file, trace_data->chkpt1);
3442 fileio_write_u32(&file, trace_data->last_instruction);
3443 fileio_write_u32(&file, trace_data->depth);
3444
3445 for (i = 0; i < trace_data->depth; i++)
3446 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3447
3448 trace_data = trace_data->next;
3449 }
3450
3451 fileio_close(&file);
3452
3453 return ERROR_OK;
3454 }
3455
3456 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3457 {
3458 struct target *target = get_current_target(CMD_CTX);
3459 struct xscale_common *xscale = target_to_xscale(target);
3460 int retval;
3461
3462 retval = xscale_verify_pointer(CMD_CTX, xscale);
3463 if (retval != ERROR_OK)
3464 return retval;
3465
3466 xscale_analyze_trace(target, CMD_CTX);
3467
3468 return ERROR_OK;
3469 }
3470
3471 COMMAND_HANDLER(xscale_handle_cp15)
3472 {
3473 struct target *target = get_current_target(CMD_CTX);
3474 struct xscale_common *xscale = target_to_xscale(target);
3475 int retval;
3476
3477 retval = xscale_verify_pointer(CMD_CTX, xscale);
3478 if (retval != ERROR_OK)
3479 return retval;
3480
3481 if (target->state != TARGET_HALTED)
3482 {
3483 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3484 return ERROR_OK;
3485 }
3486 uint32_t reg_no = 0;
3487 struct reg *reg = NULL;
3488 if (CMD_ARGC > 0)
3489 {
3490 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3491 /*translate from xscale cp15 register no to openocd register*/
3492 switch (reg_no)
3493 {
3494 case 0:
3495 reg_no = XSCALE_MAINID;
3496 break;
3497 case 1:
3498 reg_no = XSCALE_CTRL;
3499 break;
3500 case 2:
3501 reg_no = XSCALE_TTB;
3502 break;
3503 case 3:
3504 reg_no = XSCALE_DAC;
3505 break;
3506 case 5:
3507 reg_no = XSCALE_FSR;
3508 break;
3509 case 6:
3510 reg_no = XSCALE_FAR;
3511 break;
3512 case 13:
3513 reg_no = XSCALE_PID;
3514 break;
3515 case 15:
3516 reg_no = XSCALE_CPACCESS;
3517 break;
3518 default:
3519 command_print(CMD_CTX, "invalid register number");
3520 return ERROR_INVALID_ARGUMENTS;
3521 }
3522 reg = &xscale->reg_cache->reg_list[reg_no];
3523
3524 }
3525 if (CMD_ARGC == 1)
3526 {
3527 uint32_t value;
3528
3529 /* read cp15 control register */
3530 xscale_get_reg(reg);
3531 value = buf_get_u32(reg->value, 0, 32);
3532 command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
3533 }
3534 else if (CMD_ARGC == 2)
3535 {
3536 uint32_t value;
3537 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3538
3539 /* send CP write request (command 0x41) */
3540 xscale_send_u32(target, 0x41);
3541
3542 /* send CP register number */
3543 xscale_send_u32(target, reg_no);
3544
3545 /* send CP register value */
3546 xscale_send_u32(target, value);
3547
3548 /* execute cpwait to ensure outstanding operations complete */
3549 xscale_send_u32(target, 0x53);
3550 }
3551 else
3552 {
3553 command_print(CMD_CTX, "usage: cp15 [register]<, [value]>");
3554 }
3555
3556 return ERROR_OK;
3557 }
3558
3559 static int xscale_register_commands(struct command_context *cmd_ctx)
3560 {
3561 struct command *xscale_cmd;
3562
3563 xscale_cmd = register_command(cmd_ctx, NULL, "xscale", NULL, COMMAND_ANY, "xscale specific commands");
3564
3565 register_command(cmd_ctx, xscale_cmd, "debug_handler", xscale_handle_debug_handler_command, COMMAND_ANY, "'xscale debug_handler <target#> <address>' command takes two required operands");
3566 register_command(cmd_ctx, xscale_cmd, "cache_clean_address", xscale_handle_cache_clean_address_command, COMMAND_ANY, NULL);
3567
3568 register_command(cmd_ctx, xscale_cmd, "cache_info", xscale_handle_cache_info_command, COMMAND_EXEC, NULL);
3569 register_command(cmd_ctx, xscale_cmd, "mmu", xscale_handle_mmu_command, COMMAND_EXEC, "['enable'|'disable'] the MMU");
3570 register_command(cmd_ctx, xscale_cmd, "icache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the ICache");
3571 register_command(cmd_ctx, xscale_cmd, "dcache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the DCache");
3572
3573 register_command(cmd_ctx, xscale_cmd, "vector_catch", xscale_handle_vector_catch_command, COMMAND_EXEC, "<mask> of vectors that should be catched");
3574 register_command(cmd_ctx, xscale_cmd, "vector_table", xscale_handle_vector_table_command, COMMAND_EXEC, "<high|low> <index> <code> set static code for exception handler entry");
3575
3576 register_command(cmd_ctx, xscale_cmd, "trace_buffer", xscale_handle_trace_buffer_command, COMMAND_EXEC, "<enable | disable> ['fill' [n]|'wrap']");
3577
3578 register_command(cmd_ctx, xscale_cmd, "dump_trace", xscale_handle_dump_trace_command, COMMAND_EXEC, "dump content of trace buffer to <file>");
3579 register_command(cmd_ctx, xscale_cmd, "analyze_trace", xscale_handle_analyze_trace_buffer_command, COMMAND_EXEC, "analyze content of trace buffer");
3580 register_command(cmd_ctx, xscale_cmd, "trace_image", xscale_handle_trace_image_command,
3581 COMMAND_EXEC, "load image from <file> [base address]");
3582
3583 register_command(cmd_ctx, xscale_cmd, "cp15", xscale_handle_cp15, COMMAND_EXEC, "access coproc 15 <register> [value]");
3584
3585 armv4_5_register_commands(cmd_ctx);
3586
3587 return ERROR_OK;
3588 }
3589
3590 struct target_type xscale_target =
3591 {
3592 .name = "xscale",
3593
3594 .poll = xscale_poll,
3595 .arch_state = xscale_arch_state,
3596
3597 .target_request_data = NULL,
3598
3599 .halt = xscale_halt,
3600 .resume = xscale_resume,
3601 .step = xscale_step,
3602
3603 .assert_reset = xscale_assert_reset,
3604 .deassert_reset = xscale_deassert_reset,
3605 .soft_reset_halt = NULL,
3606
3607 .get_gdb_reg_list = armv4_5_get_gdb_reg_list,
3608
3609 .read_memory = xscale_read_memory,
3610 .write_memory = xscale_write_memory,
3611 .bulk_write_memory = xscale_bulk_write_memory,
3612
3613 .checksum_memory = arm_checksum_memory,
3614 .blank_check_memory = arm_blank_check_memory,
3615
3616 .run_algorithm = armv4_5_run_algorithm,
3617
3618 .add_breakpoint = xscale_add_breakpoint,
3619 .remove_breakpoint = xscale_remove_breakpoint,
3620 .add_watchpoint = xscale_add_watchpoint,
3621 .remove_watchpoint = xscale_remove_watchpoint,
3622
3623 .register_commands = xscale_register_commands,
3624 .target_create = xscale_target_create,
3625 .init_target = xscale_init_target,
3626
3627 .virt2phys = xscale_virt2phys,
3628 .mmu = xscale_mmu
3629 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)