ARM: simplify CPSR handling
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
29
30 #include "breakpoints.h"
31 #include "xscale.h"
32 #include "target_type.h"
33 #include "arm_jtag.h"
34 #include "arm_simulator.h"
35 #include "arm_disassembler.h"
36 #include "time_support.h"
37 #include "register.h"
38 #include "image.h"
39
40
41 /*
42 * Important XScale documents available as of October 2009 include:
43 *
44 * Intel XScale® Core Developer’s Manual, January 2004
45 * Order Number: 273473-002
46 * This has a chapter detailing debug facilities, and punts some
47 * details to chip-specific microarchitecture documents.
48 *
49 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
50 * Document Number: 273539-005
51 * Less detailed than the developer's manual, but summarizes those
52 * missing details (for most XScales) and gives LOTS of notes about
53 * debugger/handler interaction issues. Presents a simpler reset
54 * and load-handler sequence than the arch doc. (Note, OpenOCD
55 * doesn't currently support "Hot-Debug" as defined there.)
56 *
57 * Chip-specific microarchitecture documents may also be useful.
58 */
59
60
61 /* forward declarations */
62 static int xscale_resume(struct target *, int current,
63 uint32_t address, int handle_breakpoints, int debug_execution);
64 static int xscale_debug_entry(struct target *);
65 static int xscale_restore_context(struct target *);
66 static int xscale_get_reg(struct reg *reg);
67 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
68 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
69 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
70 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
71 static int xscale_read_trace(struct target *);
72
73
74 /* This XScale "debug handler" is loaded into the processor's
75 * mini-ICache, which is 2K of code writable only via JTAG.
76 *
77 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
78 * binary files cleanly. It's string oriented, and terminates them
79 * with a NUL character. Better would be to generate the constants
80 * and let other code decide names, scoping, and other housekeeping.
81 */
82 static /* unsigned const char xscale_debug_handler[] = ... */
83 #include "xscale_debug.h"
84
85 static char *const xscale_reg_list[] =
86 {
87 "XSCALE_MAINID", /* 0 */
88 "XSCALE_CACHETYPE",
89 "XSCALE_CTRL",
90 "XSCALE_AUXCTRL",
91 "XSCALE_TTB",
92 "XSCALE_DAC",
93 "XSCALE_FSR",
94 "XSCALE_FAR",
95 "XSCALE_PID",
96 "XSCALE_CPACCESS",
97 "XSCALE_IBCR0", /* 10 */
98 "XSCALE_IBCR1",
99 "XSCALE_DBR0",
100 "XSCALE_DBR1",
101 "XSCALE_DBCON",
102 "XSCALE_TBREG",
103 "XSCALE_CHKPT0",
104 "XSCALE_CHKPT1",
105 "XSCALE_DCSR",
106 "XSCALE_TX",
107 "XSCALE_RX", /* 20 */
108 "XSCALE_TXRXCTRL",
109 };
110
111 static const struct xscale_reg xscale_reg_arch_info[] =
112 {
113 {XSCALE_MAINID, NULL},
114 {XSCALE_CACHETYPE, NULL},
115 {XSCALE_CTRL, NULL},
116 {XSCALE_AUXCTRL, NULL},
117 {XSCALE_TTB, NULL},
118 {XSCALE_DAC, NULL},
119 {XSCALE_FSR, NULL},
120 {XSCALE_FAR, NULL},
121 {XSCALE_PID, NULL},
122 {XSCALE_CPACCESS, NULL},
123 {XSCALE_IBCR0, NULL},
124 {XSCALE_IBCR1, NULL},
125 {XSCALE_DBR0, NULL},
126 {XSCALE_DBR1, NULL},
127 {XSCALE_DBCON, NULL},
128 {XSCALE_TBREG, NULL},
129 {XSCALE_CHKPT0, NULL},
130 {XSCALE_CHKPT1, NULL},
131 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
132 {-1, NULL}, /* TX accessed via JTAG */
133 {-1, NULL}, /* RX accessed via JTAG */
134 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
135 };
136
137 /* convenience wrapper to access XScale specific registers */
138 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
139 {
140 uint8_t buf[4];
141
142 buf_set_u32(buf, 0, 32, value);
143
144 return xscale_set_reg(reg, buf);
145 }
146
147 static const char xscale_not[] = "target is not an XScale";
148
149 static int xscale_verify_pointer(struct command_context *cmd_ctx,
150 struct xscale_common *xscale)
151 {
152 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
153 command_print(cmd_ctx, xscale_not);
154 return ERROR_TARGET_INVALID;
155 }
156 return ERROR_OK;
157 }
158
159 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr)
160 {
161 if (tap == NULL)
162 return ERROR_FAIL;
163
164 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
165 {
166 struct scan_field field;
167 uint8_t scratch[4];
168
169 memset(&field, 0, sizeof field);
170 field.tap = tap;
171 field.num_bits = tap->ir_length;
172 field.out_value = scratch;
173 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
174
175 jtag_add_ir_scan(1, &field, jtag_get_end_state());
176 }
177
178 return ERROR_OK;
179 }
180
181 static int xscale_read_dcsr(struct target *target)
182 {
183 struct xscale_common *xscale = target_to_xscale(target);
184 int retval;
185 struct scan_field fields[3];
186 uint8_t field0 = 0x0;
187 uint8_t field0_check_value = 0x2;
188 uint8_t field0_check_mask = 0x7;
189 uint8_t field2 = 0x0;
190 uint8_t field2_check_value = 0x0;
191 uint8_t field2_check_mask = 0x1;
192
193 jtag_set_end_state(TAP_DRPAUSE);
194 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
195
196 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
197 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
198
199 memset(&fields, 0, sizeof fields);
200
201 fields[0].tap = target->tap;
202 fields[0].num_bits = 3;
203 fields[0].out_value = &field0;
204 uint8_t tmp;
205 fields[0].in_value = &tmp;
206
207 fields[1].tap = target->tap;
208 fields[1].num_bits = 32;
209 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
210
211 fields[2].tap = target->tap;
212 fields[2].num_bits = 1;
213 fields[2].out_value = &field2;
214 uint8_t tmp2;
215 fields[2].in_value = &tmp2;
216
217 jtag_add_dr_scan(3, fields, jtag_get_end_state());
218
219 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
220 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
221
222 if ((retval = jtag_execute_queue()) != ERROR_OK)
223 {
224 LOG_ERROR("JTAG error while reading DCSR");
225 return retval;
226 }
227
228 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
229 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
230
231 /* write the register with the value we just read
232 * on this second pass, only the first bit of field0 is guaranteed to be 0)
233 */
234 field0_check_mask = 0x1;
235 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
236 fields[1].in_value = NULL;
237
238 jtag_set_end_state(TAP_IDLE);
239
240 jtag_add_dr_scan(3, fields, jtag_get_end_state());
241
242 /* DANGER!!! this must be here. It will make sure that the arguments
243 * to jtag_set_check_value() does not go out of scope! */
244 return jtag_execute_queue();
245 }
246
247
248 static void xscale_getbuf(jtag_callback_data_t arg)
249 {
250 uint8_t *in = (uint8_t *)arg;
251 *((uint32_t *)in) = buf_get_u32(in, 0, 32);
252 }
253
254 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
255 {
256 if (num_words == 0)
257 return ERROR_INVALID_ARGUMENTS;
258
259 int retval = ERROR_OK;
260 tap_state_t path[3];
261 struct scan_field fields[3];
262 uint8_t *field0 = malloc(num_words * 1);
263 uint8_t field0_check_value = 0x2;
264 uint8_t field0_check_mask = 0x6;
265 uint32_t *field1 = malloc(num_words * 4);
266 uint8_t field2_check_value = 0x0;
267 uint8_t field2_check_mask = 0x1;
268 int words_done = 0;
269 int words_scheduled = 0;
270 int i;
271
272 path[0] = TAP_DRSELECT;
273 path[1] = TAP_DRCAPTURE;
274 path[2] = TAP_DRSHIFT;
275
276 memset(&fields, 0, sizeof fields);
277
278 fields[0].tap = target->tap;
279 fields[0].num_bits = 3;
280 fields[0].check_value = &field0_check_value;
281 fields[0].check_mask = &field0_check_mask;
282
283 fields[1].tap = target->tap;
284 fields[1].num_bits = 32;
285
286 fields[2].tap = target->tap;
287 fields[2].num_bits = 1;
288 fields[2].check_value = &field2_check_value;
289 fields[2].check_mask = &field2_check_mask;
290
291 jtag_set_end_state(TAP_IDLE);
292 xscale_jtag_set_instr(target->tap, XSCALE_DBGTX);
293 jtag_add_runtest(1, jtag_get_end_state()); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
294
295 /* repeat until all words have been collected */
296 int attempts = 0;
297 while (words_done < num_words)
298 {
299 /* schedule reads */
300 words_scheduled = 0;
301 for (i = words_done; i < num_words; i++)
302 {
303 fields[0].in_value = &field0[i];
304
305 jtag_add_pathmove(3, path);
306
307 fields[1].in_value = (uint8_t *)(field1 + i);
308
309 jtag_add_dr_scan_check(3, fields, jtag_set_end_state(TAP_IDLE));
310
311 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
312
313 words_scheduled++;
314 }
315
316 if ((retval = jtag_execute_queue()) != ERROR_OK)
317 {
318 LOG_ERROR("JTAG error while receiving data from debug handler");
319 break;
320 }
321
322 /* examine results */
323 for (i = words_done; i < num_words; i++)
324 {
325 if (!(field0[0] & 1))
326 {
327 /* move backwards if necessary */
328 int j;
329 for (j = i; j < num_words - 1; j++)
330 {
331 field0[j] = field0[j + 1];
332 field1[j] = field1[j + 1];
333 }
334 words_scheduled--;
335 }
336 }
337 if (words_scheduled == 0)
338 {
339 if (attempts++==1000)
340 {
341 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
342 retval = ERROR_TARGET_TIMEOUT;
343 break;
344 }
345 }
346
347 words_done += words_scheduled;
348 }
349
350 for (i = 0; i < num_words; i++)
351 *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
352
353 free(field1);
354
355 return retval;
356 }
357
358 static int xscale_read_tx(struct target *target, int consume)
359 {
360 struct xscale_common *xscale = target_to_xscale(target);
361 tap_state_t path[3];
362 tap_state_t noconsume_path[6];
363 int retval;
364 struct timeval timeout, now;
365 struct scan_field fields[3];
366 uint8_t field0_in = 0x0;
367 uint8_t field0_check_value = 0x2;
368 uint8_t field0_check_mask = 0x6;
369 uint8_t field2_check_value = 0x0;
370 uint8_t field2_check_mask = 0x1;
371
372 jtag_set_end_state(TAP_IDLE);
373
374 xscale_jtag_set_instr(target->tap, XSCALE_DBGTX);
375
376 path[0] = TAP_DRSELECT;
377 path[1] = TAP_DRCAPTURE;
378 path[2] = TAP_DRSHIFT;
379
380 noconsume_path[0] = TAP_DRSELECT;
381 noconsume_path[1] = TAP_DRCAPTURE;
382 noconsume_path[2] = TAP_DREXIT1;
383 noconsume_path[3] = TAP_DRPAUSE;
384 noconsume_path[4] = TAP_DREXIT2;
385 noconsume_path[5] = TAP_DRSHIFT;
386
387 memset(&fields, 0, sizeof fields);
388
389 fields[0].tap = target->tap;
390 fields[0].num_bits = 3;
391 fields[0].in_value = &field0_in;
392
393 fields[1].tap = target->tap;
394 fields[1].num_bits = 32;
395 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
396
397 fields[2].tap = target->tap;
398 fields[2].num_bits = 1;
399 uint8_t tmp;
400 fields[2].in_value = &tmp;
401
402 gettimeofday(&timeout, NULL);
403 timeval_add_time(&timeout, 1, 0);
404
405 for (;;)
406 {
407 /* if we want to consume the register content (i.e. clear TX_READY),
408 * we have to go straight from Capture-DR to Shift-DR
409 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
410 */
411 if (consume)
412 jtag_add_pathmove(3, path);
413 else
414 {
415 jtag_add_pathmove(sizeof(noconsume_path)/sizeof(*noconsume_path), noconsume_path);
416 }
417
418 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
419
420 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
421 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
422
423 if ((retval = jtag_execute_queue()) != ERROR_OK)
424 {
425 LOG_ERROR("JTAG error while reading TX");
426 return ERROR_TARGET_TIMEOUT;
427 }
428
429 gettimeofday(&now, NULL);
430 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
431 {
432 LOG_ERROR("time out reading TX register");
433 return ERROR_TARGET_TIMEOUT;
434 }
435 if (!((!(field0_in & 1)) && consume))
436 {
437 goto done;
438 }
439 if (debug_level >= 3)
440 {
441 LOG_DEBUG("waiting 100ms");
442 alive_sleep(100); /* avoid flooding the logs */
443 } else
444 {
445 keep_alive();
446 }
447 }
448 done:
449
450 if (!(field0_in & 1))
451 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
452
453 return ERROR_OK;
454 }
455
456 static int xscale_write_rx(struct target *target)
457 {
458 struct xscale_common *xscale = target_to_xscale(target);
459 int retval;
460 struct timeval timeout, now;
461 struct scan_field fields[3];
462 uint8_t field0_out = 0x0;
463 uint8_t field0_in = 0x0;
464 uint8_t field0_check_value = 0x2;
465 uint8_t field0_check_mask = 0x6;
466 uint8_t field2 = 0x0;
467 uint8_t field2_check_value = 0x0;
468 uint8_t field2_check_mask = 0x1;
469
470 jtag_set_end_state(TAP_IDLE);
471
472 xscale_jtag_set_instr(target->tap, XSCALE_DBGRX);
473
474 memset(&fields, 0, sizeof fields);
475
476 fields[0].tap = target->tap;
477 fields[0].num_bits = 3;
478 fields[0].out_value = &field0_out;
479 fields[0].in_value = &field0_in;
480
481 fields[1].tap = target->tap;
482 fields[1].num_bits = 32;
483 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
484
485 fields[2].tap = target->tap;
486 fields[2].num_bits = 1;
487 fields[2].out_value = &field2;
488 uint8_t tmp;
489 fields[2].in_value = &tmp;
490
491 gettimeofday(&timeout, NULL);
492 timeval_add_time(&timeout, 1, 0);
493
494 /* poll until rx_read is low */
495 LOG_DEBUG("polling RX");
496 for (;;)
497 {
498 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
499
500 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
501 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
502
503 if ((retval = jtag_execute_queue()) != ERROR_OK)
504 {
505 LOG_ERROR("JTAG error while writing RX");
506 return retval;
507 }
508
509 gettimeofday(&now, NULL);
510 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
511 {
512 LOG_ERROR("time out writing RX register");
513 return ERROR_TARGET_TIMEOUT;
514 }
515 if (!(field0_in & 1))
516 goto done;
517 if (debug_level >= 3)
518 {
519 LOG_DEBUG("waiting 100ms");
520 alive_sleep(100); /* avoid flooding the logs */
521 } else
522 {
523 keep_alive();
524 }
525 }
526 done:
527
528 /* set rx_valid */
529 field2 = 0x1;
530 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
531
532 if ((retval = jtag_execute_queue()) != ERROR_OK)
533 {
534 LOG_ERROR("JTAG error while writing RX");
535 return retval;
536 }
537
538 return ERROR_OK;
539 }
540
541 /* send count elements of size byte to the debug handler */
542 static int xscale_send(struct target *target, uint8_t *buffer, int count, int size)
543 {
544 uint32_t t[3];
545 int bits[3];
546 int retval;
547 int done_count = 0;
548
549 jtag_set_end_state(TAP_IDLE);
550
551 xscale_jtag_set_instr(target->tap, XSCALE_DBGRX);
552
553 bits[0]=3;
554 t[0]=0;
555 bits[1]=32;
556 t[2]=1;
557 bits[2]=1;
558 int endianness = target->endianness;
559 while (done_count++ < count)
560 {
561 switch (size)
562 {
563 case 4:
564 if (endianness == TARGET_LITTLE_ENDIAN)
565 {
566 t[1]=le_to_h_u32(buffer);
567 } else
568 {
569 t[1]=be_to_h_u32(buffer);
570 }
571 break;
572 case 2:
573 if (endianness == TARGET_LITTLE_ENDIAN)
574 {
575 t[1]=le_to_h_u16(buffer);
576 } else
577 {
578 t[1]=be_to_h_u16(buffer);
579 }
580 break;
581 case 1:
582 t[1]=buffer[0];
583 break;
584 default:
585 LOG_ERROR("BUG: size neither 4, 2 nor 1");
586 return ERROR_INVALID_ARGUMENTS;
587 }
588 jtag_add_dr_out(target->tap,
589 3,
590 bits,
591 t,
592 jtag_set_end_state(TAP_IDLE));
593 buffer += size;
594 }
595
596 if ((retval = jtag_execute_queue()) != ERROR_OK)
597 {
598 LOG_ERROR("JTAG error while sending data to debug handler");
599 return retval;
600 }
601
602 return ERROR_OK;
603 }
604
605 static int xscale_send_u32(struct target *target, uint32_t value)
606 {
607 struct xscale_common *xscale = target_to_xscale(target);
608
609 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
610 return xscale_write_rx(target);
611 }
612
613 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
614 {
615 struct xscale_common *xscale = target_to_xscale(target);
616 int retval;
617 struct scan_field fields[3];
618 uint8_t field0 = 0x0;
619 uint8_t field0_check_value = 0x2;
620 uint8_t field0_check_mask = 0x7;
621 uint8_t field2 = 0x0;
622 uint8_t field2_check_value = 0x0;
623 uint8_t field2_check_mask = 0x1;
624
625 if (hold_rst != -1)
626 xscale->hold_rst = hold_rst;
627
628 if (ext_dbg_brk != -1)
629 xscale->external_debug_break = ext_dbg_brk;
630
631 jtag_set_end_state(TAP_IDLE);
632 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
633
634 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
635 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
636
637 memset(&fields, 0, sizeof fields);
638
639 fields[0].tap = target->tap;
640 fields[0].num_bits = 3;
641 fields[0].out_value = &field0;
642 uint8_t tmp;
643 fields[0].in_value = &tmp;
644
645 fields[1].tap = target->tap;
646 fields[1].num_bits = 32;
647 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
648
649 fields[2].tap = target->tap;
650 fields[2].num_bits = 1;
651 fields[2].out_value = &field2;
652 uint8_t tmp2;
653 fields[2].in_value = &tmp2;
654
655 jtag_add_dr_scan(3, fields, jtag_get_end_state());
656
657 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
658 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
659
660 if ((retval = jtag_execute_queue()) != ERROR_OK)
661 {
662 LOG_ERROR("JTAG error while writing DCSR");
663 return retval;
664 }
665
666 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
667 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
668
669 return ERROR_OK;
670 }
671
672 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
673 static unsigned int parity (unsigned int v)
674 {
675 // unsigned int ov = v;
676 v ^= v >> 16;
677 v ^= v >> 8;
678 v ^= v >> 4;
679 v &= 0xf;
680 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
681 return (0x6996 >> v) & 1;
682 }
683
684 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
685 {
686 uint8_t packet[4];
687 uint8_t cmd;
688 int word;
689 struct scan_field fields[2];
690
691 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
692
693 /* LDIC into IR */
694 jtag_set_end_state(TAP_IDLE);
695 xscale_jtag_set_instr(target->tap, XSCALE_LDIC);
696
697 /* CMD is b011 to load a cacheline into the Mini ICache.
698 * Loading into the main ICache is deprecated, and unused.
699 * It's followed by three zero bits, and 27 address bits.
700 */
701 buf_set_u32(&cmd, 0, 6, 0x3);
702
703 /* virtual address of desired cache line */
704 buf_set_u32(packet, 0, 27, va >> 5);
705
706 memset(&fields, 0, sizeof fields);
707
708 fields[0].tap = target->tap;
709 fields[0].num_bits = 6;
710 fields[0].out_value = &cmd;
711
712 fields[1].tap = target->tap;
713 fields[1].num_bits = 27;
714 fields[1].out_value = packet;
715
716 jtag_add_dr_scan(2, fields, jtag_get_end_state());
717
718 /* rest of packet is a cacheline: 8 instructions, with parity */
719 fields[0].num_bits = 32;
720 fields[0].out_value = packet;
721
722 fields[1].num_bits = 1;
723 fields[1].out_value = &cmd;
724
725 for (word = 0; word < 8; word++)
726 {
727 buf_set_u32(packet, 0, 32, buffer[word]);
728
729 uint32_t value;
730 memcpy(&value, packet, sizeof(uint32_t));
731 cmd = parity(value);
732
733 jtag_add_dr_scan(2, fields, jtag_get_end_state());
734 }
735
736 return jtag_execute_queue();
737 }
738
739 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
740 {
741 uint8_t packet[4];
742 uint8_t cmd;
743 struct scan_field fields[2];
744
745 jtag_set_end_state(TAP_IDLE);
746 xscale_jtag_set_instr(target->tap, XSCALE_LDIC);
747
748 /* CMD for invalidate IC line b000, bits [6:4] b000 */
749 buf_set_u32(&cmd, 0, 6, 0x0);
750
751 /* virtual address of desired cache line */
752 buf_set_u32(packet, 0, 27, va >> 5);
753
754 memset(&fields, 0, sizeof fields);
755
756 fields[0].tap = target->tap;
757 fields[0].num_bits = 6;
758 fields[0].out_value = &cmd;
759
760 fields[1].tap = target->tap;
761 fields[1].num_bits = 27;
762 fields[1].out_value = packet;
763
764 jtag_add_dr_scan(2, fields, jtag_get_end_state());
765
766 return ERROR_OK;
767 }
768
769 static int xscale_update_vectors(struct target *target)
770 {
771 struct xscale_common *xscale = target_to_xscale(target);
772 int i;
773 int retval;
774
775 uint32_t low_reset_branch, high_reset_branch;
776
777 for (i = 1; i < 8; i++)
778 {
779 /* if there's a static vector specified for this exception, override */
780 if (xscale->static_high_vectors_set & (1 << i))
781 {
782 xscale->high_vectors[i] = xscale->static_high_vectors[i];
783 }
784 else
785 {
786 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
787 if (retval == ERROR_TARGET_TIMEOUT)
788 return retval;
789 if (retval != ERROR_OK)
790 {
791 /* Some of these reads will fail as part of normal execution */
792 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
793 }
794 }
795 }
796
797 for (i = 1; i < 8; i++)
798 {
799 if (xscale->static_low_vectors_set & (1 << i))
800 {
801 xscale->low_vectors[i] = xscale->static_low_vectors[i];
802 }
803 else
804 {
805 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
806 if (retval == ERROR_TARGET_TIMEOUT)
807 return retval;
808 if (retval != ERROR_OK)
809 {
810 /* Some of these reads will fail as part of normal execution */
811 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
812 }
813 }
814 }
815
816 /* calculate branches to debug handler */
817 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
818 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
819
820 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
821 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
822
823 /* invalidate and load exception vectors in mini i-cache */
824 xscale_invalidate_ic_line(target, 0x0);
825 xscale_invalidate_ic_line(target, 0xffff0000);
826
827 xscale_load_ic(target, 0x0, xscale->low_vectors);
828 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
829
830 return ERROR_OK;
831 }
832
833 static int xscale_arch_state(struct target *target)
834 {
835 struct xscale_common *xscale = target_to_xscale(target);
836 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
837
838 static const char *state[] =
839 {
840 "disabled", "enabled"
841 };
842
843 static const char *arch_dbg_reason[] =
844 {
845 "", "\n(processor reset)", "\n(trace buffer full)"
846 };
847
848 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
849 {
850 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
851 return ERROR_INVALID_ARGUMENTS;
852 }
853
854 LOG_USER("target halted in %s state due to %s, current mode: %s\n"
855 "cpsr: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "\n"
856 "MMU: %s, D-Cache: %s, I-Cache: %s"
857 "%s",
858 armv4_5_state_strings[armv4_5->core_state],
859 Jim_Nvp_value2name_simple(nvp_target_debug_reason, target->debug_reason)->name ,
860 arm_mode_name(armv4_5->core_mode),
861 buf_get_u32(armv4_5->cpsr->value, 0, 32),
862 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32),
863 state[xscale->armv4_5_mmu.mmu_enabled],
864 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
865 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
866 arch_dbg_reason[xscale->arch_debug_reason]);
867
868 return ERROR_OK;
869 }
870
871 static int xscale_poll(struct target *target)
872 {
873 int retval = ERROR_OK;
874
875 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
876 {
877 enum target_state previous_state = target->state;
878 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
879 {
880
881 /* there's data to read from the tx register, we entered debug state */
882 target->state = TARGET_HALTED;
883
884 /* process debug entry, fetching current mode regs */
885 retval = xscale_debug_entry(target);
886 }
887 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
888 {
889 LOG_USER("error while polling TX register, reset CPU");
890 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
891 target->state = TARGET_HALTED;
892 }
893
894 /* debug_entry could have overwritten target state (i.e. immediate resume)
895 * don't signal event handlers in that case
896 */
897 if (target->state != TARGET_HALTED)
898 return ERROR_OK;
899
900 /* if target was running, signal that we halted
901 * otherwise we reentered from debug execution */
902 if (previous_state == TARGET_RUNNING)
903 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
904 else
905 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
906 }
907
908 return retval;
909 }
910
911 static int xscale_debug_entry(struct target *target)
912 {
913 struct xscale_common *xscale = target_to_xscale(target);
914 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
915 uint32_t pc;
916 uint32_t buffer[10];
917 int i;
918 int retval;
919 uint32_t moe;
920
921 /* clear external dbg break (will be written on next DCSR read) */
922 xscale->external_debug_break = 0;
923 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
924 return retval;
925
926 /* get r0, pc, r1 to r7 and cpsr */
927 if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
928 return retval;
929
930 /* move r0 from buffer to register cache */
931 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
932 armv4_5->core_cache->reg_list[0].dirty = 1;
933 armv4_5->core_cache->reg_list[0].valid = 1;
934 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
935
936 /* move pc from buffer to register cache */
937 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
938 armv4_5->core_cache->reg_list[15].dirty = 1;
939 armv4_5->core_cache->reg_list[15].valid = 1;
940 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
941
942 /* move data from buffer to register cache */
943 for (i = 1; i <= 7; i++)
944 {
945 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
946 armv4_5->core_cache->reg_list[i].dirty = 1;
947 armv4_5->core_cache->reg_list[i].valid = 1;
948 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
949 }
950
951 buf_set_u32(armv4_5->cpsr->value, 0, 32, buffer[9]);
952 armv4_5->cpsr->dirty = 1;
953 armv4_5->cpsr->valid = 1;
954 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
955
956 armv4_5->core_mode = buffer[9] & 0x1f;
957 if (!is_arm_mode(armv4_5->core_mode))
958 {
959 target->state = TARGET_UNKNOWN;
960 LOG_ERROR("cpsr contains invalid mode value - communication failure");
961 return ERROR_TARGET_FAILURE;
962 }
963 LOG_DEBUG("target entered debug state in %s mode",
964 arm_mode_name(armv4_5->core_mode));
965
966 if (buffer[9] & 0x20)
967 armv4_5->core_state = ARMV4_5_STATE_THUMB;
968 else
969 armv4_5->core_state = ARMV4_5_STATE_ARM;
970
971
972 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
973 if ((armv4_5->core_mode != ARMV4_5_MODE_USR) && (armv4_5->core_mode != ARMV4_5_MODE_SYS))
974 {
975 xscale_receive(target, buffer, 8);
976 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
977 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).dirty = 0;
978 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).valid = 1;
979 }
980 else
981 {
982 /* r8 to r14, but no spsr */
983 xscale_receive(target, buffer, 7);
984 }
985
986 /* move data from buffer to register cache */
987 for (i = 8; i <= 14; i++)
988 {
989 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).value, 0, 32, buffer[i - 8]);
990 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 0;
991 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid = 1;
992 }
993
994 /* examine debug reason */
995 xscale_read_dcsr(target);
996 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
997
998 /* stored PC (for calculating fixup) */
999 pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1000
1001 switch (moe)
1002 {
1003 case 0x0: /* Processor reset */
1004 target->debug_reason = DBG_REASON_DBGRQ;
1005 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
1006 pc -= 4;
1007 break;
1008 case 0x1: /* Instruction breakpoint hit */
1009 target->debug_reason = DBG_REASON_BREAKPOINT;
1010 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1011 pc -= 4;
1012 break;
1013 case 0x2: /* Data breakpoint hit */
1014 target->debug_reason = DBG_REASON_WATCHPOINT;
1015 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1016 pc -= 4;
1017 break;
1018 case 0x3: /* BKPT instruction executed */
1019 target->debug_reason = DBG_REASON_BREAKPOINT;
1020 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1021 pc -= 4;
1022 break;
1023 case 0x4: /* Ext. debug event */
1024 target->debug_reason = DBG_REASON_DBGRQ;
1025 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1026 pc -= 4;
1027 break;
1028 case 0x5: /* Vector trap occured */
1029 target->debug_reason = DBG_REASON_BREAKPOINT;
1030 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1031 pc -= 4;
1032 break;
1033 case 0x6: /* Trace buffer full break */
1034 target->debug_reason = DBG_REASON_DBGRQ;
1035 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1036 pc -= 4;
1037 break;
1038 case 0x7: /* Reserved (may flag Hot-Debug support) */
1039 default:
1040 LOG_ERROR("Method of Entry is 'Reserved'");
1041 exit(-1);
1042 break;
1043 }
1044
1045 /* apply PC fixup */
1046 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
1047
1048 /* on the first debug entry, identify cache type */
1049 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1050 {
1051 uint32_t cache_type_reg;
1052
1053 /* read cp15 cache type register */
1054 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1055 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1056
1057 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1058 }
1059
1060 /* examine MMU and Cache settings */
1061 /* read cp15 control register */
1062 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1063 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1064 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1065 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1066 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1067
1068 /* tracing enabled, read collected trace data */
1069 if (xscale->trace.buffer_enabled)
1070 {
1071 xscale_read_trace(target);
1072 xscale->trace.buffer_fill--;
1073
1074 /* resume if we're still collecting trace data */
1075 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1076 && (xscale->trace.buffer_fill > 0))
1077 {
1078 xscale_resume(target, 1, 0x0, 1, 0);
1079 }
1080 else
1081 {
1082 xscale->trace.buffer_enabled = 0;
1083 }
1084 }
1085
1086 return ERROR_OK;
1087 }
1088
1089 static int xscale_halt(struct target *target)
1090 {
1091 struct xscale_common *xscale = target_to_xscale(target);
1092
1093 LOG_DEBUG("target->state: %s",
1094 target_state_name(target));
1095
1096 if (target->state == TARGET_HALTED)
1097 {
1098 LOG_DEBUG("target was already halted");
1099 return ERROR_OK;
1100 }
1101 else if (target->state == TARGET_UNKNOWN)
1102 {
1103 /* this must not happen for a xscale target */
1104 LOG_ERROR("target was in unknown state when halt was requested");
1105 return ERROR_TARGET_INVALID;
1106 }
1107 else if (target->state == TARGET_RESET)
1108 {
1109 LOG_DEBUG("target->state == TARGET_RESET");
1110 }
1111 else
1112 {
1113 /* assert external dbg break */
1114 xscale->external_debug_break = 1;
1115 xscale_read_dcsr(target);
1116
1117 target->debug_reason = DBG_REASON_DBGRQ;
1118 }
1119
1120 return ERROR_OK;
1121 }
1122
1123 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1124 {
1125 struct xscale_common *xscale = target_to_xscale(target);
1126 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1127 int retval;
1128
1129 if (xscale->ibcr0_used)
1130 {
1131 struct breakpoint *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1132
1133 if (ibcr0_bp)
1134 {
1135 xscale_unset_breakpoint(target, ibcr0_bp);
1136 }
1137 else
1138 {
1139 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1140 exit(-1);
1141 }
1142 }
1143
1144 if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
1145 return retval;
1146
1147 return ERROR_OK;
1148 }
1149
1150 static int xscale_disable_single_step(struct target *target)
1151 {
1152 struct xscale_common *xscale = target_to_xscale(target);
1153 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1154 int retval;
1155
1156 if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
1157 return retval;
1158
1159 return ERROR_OK;
1160 }
1161
1162 static void xscale_enable_watchpoints(struct target *target)
1163 {
1164 struct watchpoint *watchpoint = target->watchpoints;
1165
1166 while (watchpoint)
1167 {
1168 if (watchpoint->set == 0)
1169 xscale_set_watchpoint(target, watchpoint);
1170 watchpoint = watchpoint->next;
1171 }
1172 }
1173
1174 static void xscale_enable_breakpoints(struct target *target)
1175 {
1176 struct breakpoint *breakpoint = target->breakpoints;
1177
1178 /* set any pending breakpoints */
1179 while (breakpoint)
1180 {
1181 if (breakpoint->set == 0)
1182 xscale_set_breakpoint(target, breakpoint);
1183 breakpoint = breakpoint->next;
1184 }
1185 }
1186
1187 static int xscale_resume(struct target *target, int current,
1188 uint32_t address, int handle_breakpoints, int debug_execution)
1189 {
1190 struct xscale_common *xscale = target_to_xscale(target);
1191 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
1192 struct breakpoint *breakpoint = target->breakpoints;
1193 uint32_t current_pc;
1194 int retval;
1195 int i;
1196
1197 LOG_DEBUG("-");
1198
1199 if (target->state != TARGET_HALTED)
1200 {
1201 LOG_WARNING("target not halted");
1202 return ERROR_TARGET_NOT_HALTED;
1203 }
1204
1205 if (!debug_execution)
1206 {
1207 target_free_all_working_areas(target);
1208 }
1209
1210 /* update vector tables */
1211 if ((retval = xscale_update_vectors(target)) != ERROR_OK)
1212 return retval;
1213
1214 /* current = 1: continue on current pc, otherwise continue at <address> */
1215 if (!current)
1216 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1217
1218 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1219
1220 /* if we're at the reset vector, we have to simulate the branch */
1221 if (current_pc == 0x0)
1222 {
1223 arm_simulate_step(target, NULL);
1224 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1225 }
1226
1227 /* the front-end may request us not to handle breakpoints */
1228 if (handle_breakpoints)
1229 {
1230 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1231 {
1232 uint32_t next_pc;
1233
1234 /* there's a breakpoint at the current PC, we have to step over it */
1235 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1236 xscale_unset_breakpoint(target, breakpoint);
1237
1238 /* calculate PC of next instruction */
1239 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1240 {
1241 uint32_t current_opcode;
1242 target_read_u32(target, current_pc, &current_opcode);
1243 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1244 }
1245
1246 LOG_DEBUG("enable single-step");
1247 xscale_enable_single_step(target, next_pc);
1248
1249 /* restore banked registers */
1250 xscale_restore_context(target);
1251
1252 /* send resume request (command 0x30 or 0x31)
1253 * clean the trace buffer if it is to be enabled (0x62) */
1254 if (xscale->trace.buffer_enabled)
1255 {
1256 xscale_send_u32(target, 0x62);
1257 xscale_send_u32(target, 0x31);
1258 }
1259 else
1260 xscale_send_u32(target, 0x30);
1261
1262 /* send CPSR */
1263 xscale_send_u32(target,
1264 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1265 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1266 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1267
1268 for (i = 7; i >= 0; i--)
1269 {
1270 /* send register */
1271 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1272 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1273 }
1274
1275 /* send PC */
1276 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1277 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1278
1279 /* wait for and process debug entry */
1280 xscale_debug_entry(target);
1281
1282 LOG_DEBUG("disable single-step");
1283 xscale_disable_single_step(target);
1284
1285 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1286 xscale_set_breakpoint(target, breakpoint);
1287 }
1288 }
1289
1290 /* enable any pending breakpoints and watchpoints */
1291 xscale_enable_breakpoints(target);
1292 xscale_enable_watchpoints(target);
1293
1294 /* restore banked registers */
1295 xscale_restore_context(target);
1296
1297 /* send resume request (command 0x30 or 0x31)
1298 * clean the trace buffer if it is to be enabled (0x62) */
1299 if (xscale->trace.buffer_enabled)
1300 {
1301 xscale_send_u32(target, 0x62);
1302 xscale_send_u32(target, 0x31);
1303 }
1304 else
1305 xscale_send_u32(target, 0x30);
1306
1307 /* send CPSR */
1308 xscale_send_u32(target, buf_get_u32(armv4_5->cpsr->value, 0, 32));
1309 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1310 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1311
1312 for (i = 7; i >= 0; i--)
1313 {
1314 /* send register */
1315 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1316 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1317 }
1318
1319 /* send PC */
1320 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1321 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1322
1323 target->debug_reason = DBG_REASON_NOTHALTED;
1324
1325 if (!debug_execution)
1326 {
1327 /* registers are now invalid */
1328 register_cache_invalidate(armv4_5->core_cache);
1329 target->state = TARGET_RUNNING;
1330 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1331 }
1332 else
1333 {
1334 target->state = TARGET_DEBUG_RUNNING;
1335 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1336 }
1337
1338 LOG_DEBUG("target resumed");
1339
1340 return ERROR_OK;
1341 }
1342
1343 static int xscale_step_inner(struct target *target, int current,
1344 uint32_t address, int handle_breakpoints)
1345 {
1346 struct xscale_common *xscale = target_to_xscale(target);
1347 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
1348 uint32_t next_pc;
1349 int retval;
1350 int i;
1351
1352 target->debug_reason = DBG_REASON_SINGLESTEP;
1353
1354 /* calculate PC of next instruction */
1355 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1356 {
1357 uint32_t current_opcode, current_pc;
1358 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1359
1360 target_read_u32(target, current_pc, &current_opcode);
1361 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1362 return retval;
1363 }
1364
1365 LOG_DEBUG("enable single-step");
1366 if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
1367 return retval;
1368
1369 /* restore banked registers */
1370 if ((retval = xscale_restore_context(target)) != ERROR_OK)
1371 return retval;
1372
1373 /* send resume request (command 0x30 or 0x31)
1374 * clean the trace buffer if it is to be enabled (0x62) */
1375 if (xscale->trace.buffer_enabled)
1376 {
1377 if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
1378 return retval;
1379 if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
1380 return retval;
1381 }
1382 else
1383 if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
1384 return retval;
1385
1386 /* send CPSR */
1387 retval = xscale_send_u32(target,
1388 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1389 if (retval != ERROR_OK)
1390 return retval;
1391 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1392 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1393
1394 for (i = 7; i >= 0; i--)
1395 {
1396 /* send register */
1397 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
1398 return retval;
1399 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1400 }
1401
1402 /* send PC */
1403 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))) != ERROR_OK)
1404 return retval;
1405 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1406
1407 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1408
1409 /* registers are now invalid */
1410 register_cache_invalidate(armv4_5->core_cache);
1411
1412 /* wait for and process debug entry */
1413 if ((retval = xscale_debug_entry(target)) != ERROR_OK)
1414 return retval;
1415
1416 LOG_DEBUG("disable single-step");
1417 if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
1418 return retval;
1419
1420 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1421
1422 return ERROR_OK;
1423 }
1424
1425 static int xscale_step(struct target *target, int current,
1426 uint32_t address, int handle_breakpoints)
1427 {
1428 struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
1429 struct breakpoint *breakpoint = target->breakpoints;
1430
1431 uint32_t current_pc;
1432 int retval;
1433
1434 if (target->state != TARGET_HALTED)
1435 {
1436 LOG_WARNING("target not halted");
1437 return ERROR_TARGET_NOT_HALTED;
1438 }
1439
1440 /* current = 1: continue on current pc, otherwise continue at <address> */
1441 if (!current)
1442 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1443
1444 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1445
1446 /* if we're at the reset vector, we have to simulate the step */
1447 if (current_pc == 0x0)
1448 {
1449 if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
1450 return retval;
1451 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1452
1453 target->debug_reason = DBG_REASON_SINGLESTEP;
1454 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1455
1456 return ERROR_OK;
1457 }
1458
1459 /* the front-end may request us not to handle breakpoints */
1460 if (handle_breakpoints)
1461 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1462 {
1463 if ((retval = xscale_unset_breakpoint(target, breakpoint)) != ERROR_OK)
1464 return retval;
1465 }
1466
1467 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1468
1469 if (breakpoint)
1470 {
1471 xscale_set_breakpoint(target, breakpoint);
1472 }
1473
1474 LOG_DEBUG("target stepped");
1475
1476 return ERROR_OK;
1477
1478 }
1479
1480 static int xscale_assert_reset(struct target *target)
1481 {
1482 struct xscale_common *xscale = target_to_xscale(target);
1483
1484 LOG_DEBUG("target->state: %s",
1485 target_state_name(target));
1486
1487 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1488 * end up in T-L-R, which would reset JTAG
1489 */
1490 jtag_set_end_state(TAP_IDLE);
1491 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
1492
1493 /* set Hold reset, Halt mode and Trap Reset */
1494 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1495 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1496 xscale_write_dcsr(target, 1, 0);
1497
1498 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1499 xscale_jtag_set_instr(target->tap, 0x7f);
1500 jtag_execute_queue();
1501
1502 /* assert reset */
1503 jtag_add_reset(0, 1);
1504
1505 /* sleep 1ms, to be sure we fulfill any requirements */
1506 jtag_add_sleep(1000);
1507 jtag_execute_queue();
1508
1509 target->state = TARGET_RESET;
1510
1511 if (target->reset_halt)
1512 {
1513 int retval;
1514 if ((retval = target_halt(target)) != ERROR_OK)
1515 return retval;
1516 }
1517
1518 return ERROR_OK;
1519 }
1520
1521 static int xscale_deassert_reset(struct target *target)
1522 {
1523 struct xscale_common *xscale = target_to_xscale(target);
1524 struct breakpoint *breakpoint = target->breakpoints;
1525
1526 LOG_DEBUG("-");
1527
1528 xscale->ibcr_available = 2;
1529 xscale->ibcr0_used = 0;
1530 xscale->ibcr1_used = 0;
1531
1532 xscale->dbr_available = 2;
1533 xscale->dbr0_used = 0;
1534 xscale->dbr1_used = 0;
1535
1536 /* mark all hardware breakpoints as unset */
1537 while (breakpoint)
1538 {
1539 if (breakpoint->type == BKPT_HARD)
1540 {
1541 breakpoint->set = 0;
1542 }
1543 breakpoint = breakpoint->next;
1544 }
1545
1546 register_cache_invalidate(xscale->armv4_5_common.core_cache);
1547
1548 /* FIXME mark hardware watchpoints got unset too. Also,
1549 * at least some of the XScale registers are invalid...
1550 */
1551
1552 /*
1553 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1554 * contents got invalidated. Safer to force that, so writing new
1555 * contents can't ever fail..
1556 */
1557 {
1558 uint32_t address;
1559 unsigned buf_cnt;
1560 const uint8_t *buffer = xscale_debug_handler;
1561 int retval;
1562
1563 /* release SRST */
1564 jtag_add_reset(0, 0);
1565
1566 /* wait 300ms; 150 and 100ms were not enough */
1567 jtag_add_sleep(300*1000);
1568
1569 jtag_add_runtest(2030, jtag_set_end_state(TAP_IDLE));
1570 jtag_execute_queue();
1571
1572 /* set Hold reset, Halt mode and Trap Reset */
1573 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1574 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1575 xscale_write_dcsr(target, 1, 0);
1576
1577 /* Load the debug handler into the mini-icache. Since
1578 * it's using halt mode (not monitor mode), it runs in
1579 * "Special Debug State" for access to registers, memory,
1580 * coprocessors, trace data, etc.
1581 */
1582 address = xscale->handler_address;
1583 for (unsigned binary_size = sizeof xscale_debug_handler - 1;
1584 binary_size > 0;
1585 binary_size -= buf_cnt, buffer += buf_cnt)
1586 {
1587 uint32_t cache_line[8];
1588 unsigned i;
1589
1590 buf_cnt = binary_size;
1591 if (buf_cnt > 32)
1592 buf_cnt = 32;
1593
1594 for (i = 0; i < buf_cnt; i += 4)
1595 {
1596 /* convert LE buffer to host-endian uint32_t */
1597 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1598 }
1599
1600 for (; i < 32; i += 4)
1601 {
1602 cache_line[i / 4] = 0xe1a08008;
1603 }
1604
1605 /* only load addresses other than the reset vectors */
1606 if ((address % 0x400) != 0x0)
1607 {
1608 retval = xscale_load_ic(target, address,
1609 cache_line);
1610 if (retval != ERROR_OK)
1611 return retval;
1612 }
1613
1614 address += buf_cnt;
1615 };
1616
1617 retval = xscale_load_ic(target, 0x0,
1618 xscale->low_vectors);
1619 if (retval != ERROR_OK)
1620 return retval;
1621 retval = xscale_load_ic(target, 0xffff0000,
1622 xscale->high_vectors);
1623 if (retval != ERROR_OK)
1624 return retval;
1625
1626 jtag_add_runtest(30, jtag_set_end_state(TAP_IDLE));
1627
1628 jtag_add_sleep(100000);
1629
1630 /* set Hold reset, Halt mode and Trap Reset */
1631 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1632 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1633 xscale_write_dcsr(target, 1, 0);
1634
1635 /* clear Hold reset to let the target run (should enter debug handler) */
1636 xscale_write_dcsr(target, 0, 1);
1637 target->state = TARGET_RUNNING;
1638
1639 if (!target->reset_halt)
1640 {
1641 jtag_add_sleep(10000);
1642
1643 /* we should have entered debug now */
1644 xscale_debug_entry(target);
1645 target->state = TARGET_HALTED;
1646
1647 /* resume the target */
1648 xscale_resume(target, 1, 0x0, 1, 0);
1649 }
1650 }
1651
1652 return ERROR_OK;
1653 }
1654
1655 static int xscale_read_core_reg(struct target *target, struct reg *r,
1656 int num, enum armv4_5_mode mode)
1657 {
1658 /** \todo add debug handler support for core register reads */
1659 LOG_ERROR("not implemented");
1660 return ERROR_OK;
1661 }
1662
1663 static int xscale_write_core_reg(struct target *target, struct reg *r,
1664 int num, enum armv4_5_mode mode, uint32_t value)
1665 {
1666 /** \todo add debug handler support for core register writes */
1667 LOG_ERROR("not implemented");
1668 return ERROR_OK;
1669 }
1670
1671 static int xscale_full_context(struct target *target)
1672 {
1673 struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
1674
1675 uint32_t *buffer;
1676
1677 int i, j;
1678
1679 LOG_DEBUG("-");
1680
1681 if (target->state != TARGET_HALTED)
1682 {
1683 LOG_WARNING("target not halted");
1684 return ERROR_TARGET_NOT_HALTED;
1685 }
1686
1687 buffer = malloc(4 * 8);
1688
1689 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1690 * we can't enter User mode on an XScale (unpredictable),
1691 * but User shares registers with SYS
1692 */
1693 for (i = 1; i < 7; i++)
1694 {
1695 int valid = 1;
1696
1697 /* check if there are invalid registers in the current mode
1698 */
1699 for (j = 0; j <= 16; j++)
1700 {
1701 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid == 0)
1702 valid = 0;
1703 }
1704
1705 if (!valid)
1706 {
1707 uint32_t tmp_cpsr;
1708
1709 /* request banked registers */
1710 xscale_send_u32(target, 0x0);
1711
1712 tmp_cpsr = 0x0;
1713 tmp_cpsr |= armv4_5_number_to_mode(i);
1714 tmp_cpsr |= 0xc0; /* I/F bits */
1715
1716 /* send CPSR for desired mode */
1717 xscale_send_u32(target, tmp_cpsr);
1718
1719 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1720 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1721 {
1722 xscale_receive(target, buffer, 8);
1723 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1724 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1725 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).valid = 1;
1726 }
1727 else
1728 {
1729 xscale_receive(target, buffer, 7);
1730 }
1731
1732 /* move data from buffer to register cache */
1733 for (j = 8; j <= 14; j++)
1734 {
1735 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).value, 0, 32, buffer[j - 8]);
1736 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1737 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid = 1;
1738 }
1739 }
1740 }
1741
1742 free(buffer);
1743
1744 return ERROR_OK;
1745 }
1746
1747 static int xscale_restore_context(struct target *target)
1748 {
1749 struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
1750
1751 int i, j;
1752
1753 if (target->state != TARGET_HALTED)
1754 {
1755 LOG_WARNING("target not halted");
1756 return ERROR_TARGET_NOT_HALTED;
1757 }
1758
1759 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1760 * we can't enter User mode on an XScale (unpredictable),
1761 * but User shares registers with SYS
1762 */
1763 for (i = 1; i < 7; i++)
1764 {
1765 int dirty = 0;
1766
1767 /* check if there are invalid registers in the current mode
1768 */
1769 for (j = 8; j <= 14; j++)
1770 {
1771 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty == 1)
1772 dirty = 1;
1773 }
1774
1775 /* if not USR/SYS, check if the SPSR needs to be written */
1776 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1777 {
1778 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty == 1)
1779 dirty = 1;
1780 }
1781
1782 if (dirty)
1783 {
1784 uint32_t tmp_cpsr;
1785
1786 /* send banked registers */
1787 xscale_send_u32(target, 0x1);
1788
1789 tmp_cpsr = 0x0;
1790 tmp_cpsr |= armv4_5_number_to_mode(i);
1791 tmp_cpsr |= 0xc0; /* I/F bits */
1792
1793 /* send CPSR for desired mode */
1794 xscale_send_u32(target, tmp_cpsr);
1795
1796 /* send banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1797 for (j = 8; j <= 14; j++)
1798 {
1799 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, j).value, 0, 32));
1800 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1801 }
1802
1803 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1804 {
1805 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32));
1806 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1807 }
1808 }
1809 }
1810
1811 return ERROR_OK;
1812 }
1813
1814 static int xscale_read_memory(struct target *target, uint32_t address,
1815 uint32_t size, uint32_t count, uint8_t *buffer)
1816 {
1817 struct xscale_common *xscale = target_to_xscale(target);
1818 uint32_t *buf32;
1819 uint32_t i;
1820 int retval;
1821
1822 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1823
1824 if (target->state != TARGET_HALTED)
1825 {
1826 LOG_WARNING("target not halted");
1827 return ERROR_TARGET_NOT_HALTED;
1828 }
1829
1830 /* sanitize arguments */
1831 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1832 return ERROR_INVALID_ARGUMENTS;
1833
1834 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1835 return ERROR_TARGET_UNALIGNED_ACCESS;
1836
1837 /* send memory read request (command 0x1n, n: access size) */
1838 if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
1839 return retval;
1840
1841 /* send base address for read request */
1842 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1843 return retval;
1844
1845 /* send number of requested data words */
1846 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1847 return retval;
1848
1849 /* receive data from target (count times 32-bit words in host endianness) */
1850 buf32 = malloc(4 * count);
1851 if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
1852 return retval;
1853
1854 /* extract data from host-endian buffer into byte stream */
1855 for (i = 0; i < count; i++)
1856 {
1857 switch (size)
1858 {
1859 case 4:
1860 target_buffer_set_u32(target, buffer, buf32[i]);
1861 buffer += 4;
1862 break;
1863 case 2:
1864 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1865 buffer += 2;
1866 break;
1867 case 1:
1868 *buffer++ = buf32[i] & 0xff;
1869 break;
1870 default:
1871 LOG_ERROR("invalid read size");
1872 return ERROR_INVALID_ARGUMENTS;
1873 }
1874 }
1875
1876 free(buf32);
1877
1878 /* examine DCSR, to see if Sticky Abort (SA) got set */
1879 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1880 return retval;
1881 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1882 {
1883 /* clear SA bit */
1884 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1885 return retval;
1886
1887 return ERROR_TARGET_DATA_ABORT;
1888 }
1889
1890 return ERROR_OK;
1891 }
1892
1893 static int xscale_write_memory(struct target *target, uint32_t address,
1894 uint32_t size, uint32_t count, uint8_t *buffer)
1895 {
1896 struct xscale_common *xscale = target_to_xscale(target);
1897 int retval;
1898
1899 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1900
1901 if (target->state != TARGET_HALTED)
1902 {
1903 LOG_WARNING("target not halted");
1904 return ERROR_TARGET_NOT_HALTED;
1905 }
1906
1907 /* sanitize arguments */
1908 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1909 return ERROR_INVALID_ARGUMENTS;
1910
1911 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1912 return ERROR_TARGET_UNALIGNED_ACCESS;
1913
1914 /* send memory write request (command 0x2n, n: access size) */
1915 if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
1916 return retval;
1917
1918 /* send base address for read request */
1919 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1920 return retval;
1921
1922 /* send number of requested data words to be written*/
1923 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1924 return retval;
1925
1926 /* extract data from host-endian buffer into byte stream */
1927 #if 0
1928 for (i = 0; i < count; i++)
1929 {
1930 switch (size)
1931 {
1932 case 4:
1933 value = target_buffer_get_u32(target, buffer);
1934 xscale_send_u32(target, value);
1935 buffer += 4;
1936 break;
1937 case 2:
1938 value = target_buffer_get_u16(target, buffer);
1939 xscale_send_u32(target, value);
1940 buffer += 2;
1941 break;
1942 case 1:
1943 value = *buffer;
1944 xscale_send_u32(target, value);
1945 buffer += 1;
1946 break;
1947 default:
1948 LOG_ERROR("should never get here");
1949 exit(-1);
1950 }
1951 }
1952 #endif
1953 if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
1954 return retval;
1955
1956 /* examine DCSR, to see if Sticky Abort (SA) got set */
1957 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1958 return retval;
1959 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1960 {
1961 /* clear SA bit */
1962 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1963 return retval;
1964
1965 return ERROR_TARGET_DATA_ABORT;
1966 }
1967
1968 return ERROR_OK;
1969 }
1970
1971 static int xscale_bulk_write_memory(struct target *target, uint32_t address,
1972 uint32_t count, uint8_t *buffer)
1973 {
1974 return xscale_write_memory(target, address, 4, count, buffer);
1975 }
1976
1977 static uint32_t xscale_get_ttb(struct target *target)
1978 {
1979 struct xscale_common *xscale = target_to_xscale(target);
1980 uint32_t ttb;
1981
1982 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
1983 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
1984
1985 return ttb;
1986 }
1987
1988 static void xscale_disable_mmu_caches(struct target *target, int mmu,
1989 int d_u_cache, int i_cache)
1990 {
1991 struct xscale_common *xscale = target_to_xscale(target);
1992 uint32_t cp15_control;
1993
1994 /* read cp15 control register */
1995 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1996 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1997
1998 if (mmu)
1999 cp15_control &= ~0x1U;
2000
2001 if (d_u_cache)
2002 {
2003 /* clean DCache */
2004 xscale_send_u32(target, 0x50);
2005 xscale_send_u32(target, xscale->cache_clean_address);
2006
2007 /* invalidate DCache */
2008 xscale_send_u32(target, 0x51);
2009
2010 cp15_control &= ~0x4U;
2011 }
2012
2013 if (i_cache)
2014 {
2015 /* invalidate ICache */
2016 xscale_send_u32(target, 0x52);
2017 cp15_control &= ~0x1000U;
2018 }
2019
2020 /* write new cp15 control register */
2021 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2022
2023 /* execute cpwait to ensure outstanding operations complete */
2024 xscale_send_u32(target, 0x53);
2025 }
2026
2027 static void xscale_enable_mmu_caches(struct target *target, int mmu,
2028 int d_u_cache, int i_cache)
2029 {
2030 struct xscale_common *xscale = target_to_xscale(target);
2031 uint32_t cp15_control;
2032
2033 /* read cp15 control register */
2034 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2035 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2036
2037 if (mmu)
2038 cp15_control |= 0x1U;
2039
2040 if (d_u_cache)
2041 cp15_control |= 0x4U;
2042
2043 if (i_cache)
2044 cp15_control |= 0x1000U;
2045
2046 /* write new cp15 control register */
2047 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2048
2049 /* execute cpwait to ensure outstanding operations complete */
2050 xscale_send_u32(target, 0x53);
2051 }
2052
2053 static int xscale_set_breakpoint(struct target *target,
2054 struct breakpoint *breakpoint)
2055 {
2056 int retval;
2057 struct xscale_common *xscale = target_to_xscale(target);
2058
2059 if (target->state != TARGET_HALTED)
2060 {
2061 LOG_WARNING("target not halted");
2062 return ERROR_TARGET_NOT_HALTED;
2063 }
2064
2065 if (breakpoint->set)
2066 {
2067 LOG_WARNING("breakpoint already set");
2068 return ERROR_OK;
2069 }
2070
2071 if (breakpoint->type == BKPT_HARD)
2072 {
2073 uint32_t value = breakpoint->address | 1;
2074 if (!xscale->ibcr0_used)
2075 {
2076 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2077 xscale->ibcr0_used = 1;
2078 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2079 }
2080 else if (!xscale->ibcr1_used)
2081 {
2082 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2083 xscale->ibcr1_used = 1;
2084 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2085 }
2086 else
2087 {
2088 LOG_ERROR("BUG: no hardware comparator available");
2089 return ERROR_OK;
2090 }
2091 }
2092 else if (breakpoint->type == BKPT_SOFT)
2093 {
2094 if (breakpoint->length == 4)
2095 {
2096 /* keep the original instruction in target endianness */
2097 if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2098 {
2099 return retval;
2100 }
2101 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2102 if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2103 {
2104 return retval;
2105 }
2106 }
2107 else
2108 {
2109 /* keep the original instruction in target endianness */
2110 if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2111 {
2112 return retval;
2113 }
2114 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2115 if ((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2116 {
2117 return retval;
2118 }
2119 }
2120 breakpoint->set = 1;
2121 }
2122
2123 return ERROR_OK;
2124 }
2125
2126 static int xscale_add_breakpoint(struct target *target,
2127 struct breakpoint *breakpoint)
2128 {
2129 struct xscale_common *xscale = target_to_xscale(target);
2130
2131 if (target->state != TARGET_HALTED)
2132 {
2133 LOG_WARNING("target not halted");
2134 return ERROR_TARGET_NOT_HALTED;
2135 }
2136
2137 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2138 {
2139 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2140 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2141 }
2142
2143 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2144 {
2145 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2146 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2147 }
2148
2149 if (breakpoint->type == BKPT_HARD)
2150 {
2151 xscale->ibcr_available--;
2152 }
2153
2154 return ERROR_OK;
2155 }
2156
2157 static int xscale_unset_breakpoint(struct target *target,
2158 struct breakpoint *breakpoint)
2159 {
2160 int retval;
2161 struct xscale_common *xscale = target_to_xscale(target);
2162
2163 if (target->state != TARGET_HALTED)
2164 {
2165 LOG_WARNING("target not halted");
2166 return ERROR_TARGET_NOT_HALTED;
2167 }
2168
2169 if (!breakpoint->set)
2170 {
2171 LOG_WARNING("breakpoint not set");
2172 return ERROR_OK;
2173 }
2174
2175 if (breakpoint->type == BKPT_HARD)
2176 {
2177 if (breakpoint->set == 1)
2178 {
2179 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2180 xscale->ibcr0_used = 0;
2181 }
2182 else if (breakpoint->set == 2)
2183 {
2184 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2185 xscale->ibcr1_used = 0;
2186 }
2187 breakpoint->set = 0;
2188 }
2189 else
2190 {
2191 /* restore original instruction (kept in target endianness) */
2192 if (breakpoint->length == 4)
2193 {
2194 if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2195 {
2196 return retval;
2197 }
2198 }
2199 else
2200 {
2201 if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2202 {
2203 return retval;
2204 }
2205 }
2206 breakpoint->set = 0;
2207 }
2208
2209 return ERROR_OK;
2210 }
2211
2212 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2213 {
2214 struct xscale_common *xscale = target_to_xscale(target);
2215
2216 if (target->state != TARGET_HALTED)
2217 {
2218 LOG_WARNING("target not halted");
2219 return ERROR_TARGET_NOT_HALTED;
2220 }
2221
2222 if (breakpoint->set)
2223 {
2224 xscale_unset_breakpoint(target, breakpoint);
2225 }
2226
2227 if (breakpoint->type == BKPT_HARD)
2228 xscale->ibcr_available++;
2229
2230 return ERROR_OK;
2231 }
2232
2233 static int xscale_set_watchpoint(struct target *target,
2234 struct watchpoint *watchpoint)
2235 {
2236 struct xscale_common *xscale = target_to_xscale(target);
2237 uint8_t enable = 0;
2238 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2239 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2240
2241 if (target->state != TARGET_HALTED)
2242 {
2243 LOG_WARNING("target not halted");
2244 return ERROR_TARGET_NOT_HALTED;
2245 }
2246
2247 xscale_get_reg(dbcon);
2248
2249 switch (watchpoint->rw)
2250 {
2251 case WPT_READ:
2252 enable = 0x3;
2253 break;
2254 case WPT_ACCESS:
2255 enable = 0x2;
2256 break;
2257 case WPT_WRITE:
2258 enable = 0x1;
2259 break;
2260 default:
2261 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2262 }
2263
2264 if (!xscale->dbr0_used)
2265 {
2266 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2267 dbcon_value |= enable;
2268 xscale_set_reg_u32(dbcon, dbcon_value);
2269 watchpoint->set = 1;
2270 xscale->dbr0_used = 1;
2271 }
2272 else if (!xscale->dbr1_used)
2273 {
2274 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2275 dbcon_value |= enable << 2;
2276 xscale_set_reg_u32(dbcon, dbcon_value);
2277 watchpoint->set = 2;
2278 xscale->dbr1_used = 1;
2279 }
2280 else
2281 {
2282 LOG_ERROR("BUG: no hardware comparator available");
2283 return ERROR_OK;
2284 }
2285
2286 return ERROR_OK;
2287 }
2288
2289 static int xscale_add_watchpoint(struct target *target,
2290 struct watchpoint *watchpoint)
2291 {
2292 struct xscale_common *xscale = target_to_xscale(target);
2293
2294 if (target->state != TARGET_HALTED)
2295 {
2296 LOG_WARNING("target not halted");
2297 return ERROR_TARGET_NOT_HALTED;
2298 }
2299
2300 if (xscale->dbr_available < 1)
2301 {
2302 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2303 }
2304
2305 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2306 {
2307 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2308 }
2309
2310 xscale->dbr_available--;
2311
2312 return ERROR_OK;
2313 }
2314
2315 static int xscale_unset_watchpoint(struct target *target,
2316 struct watchpoint *watchpoint)
2317 {
2318 struct xscale_common *xscale = target_to_xscale(target);
2319 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2320 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2321
2322 if (target->state != TARGET_HALTED)
2323 {
2324 LOG_WARNING("target not halted");
2325 return ERROR_TARGET_NOT_HALTED;
2326 }
2327
2328 if (!watchpoint->set)
2329 {
2330 LOG_WARNING("breakpoint not set");
2331 return ERROR_OK;
2332 }
2333
2334 if (watchpoint->set == 1)
2335 {
2336 dbcon_value &= ~0x3;
2337 xscale_set_reg_u32(dbcon, dbcon_value);
2338 xscale->dbr0_used = 0;
2339 }
2340 else if (watchpoint->set == 2)
2341 {
2342 dbcon_value &= ~0xc;
2343 xscale_set_reg_u32(dbcon, dbcon_value);
2344 xscale->dbr1_used = 0;
2345 }
2346 watchpoint->set = 0;
2347
2348 return ERROR_OK;
2349 }
2350
2351 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2352 {
2353 struct xscale_common *xscale = target_to_xscale(target);
2354
2355 if (target->state != TARGET_HALTED)
2356 {
2357 LOG_WARNING("target not halted");
2358 return ERROR_TARGET_NOT_HALTED;
2359 }
2360
2361 if (watchpoint->set)
2362 {
2363 xscale_unset_watchpoint(target, watchpoint);
2364 }
2365
2366 xscale->dbr_available++;
2367
2368 return ERROR_OK;
2369 }
2370
2371 static int xscale_get_reg(struct reg *reg)
2372 {
2373 struct xscale_reg *arch_info = reg->arch_info;
2374 struct target *target = arch_info->target;
2375 struct xscale_common *xscale = target_to_xscale(target);
2376
2377 /* DCSR, TX and RX are accessible via JTAG */
2378 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2379 {
2380 return xscale_read_dcsr(arch_info->target);
2381 }
2382 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2383 {
2384 /* 1 = consume register content */
2385 return xscale_read_tx(arch_info->target, 1);
2386 }
2387 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2388 {
2389 /* can't read from RX register (host -> debug handler) */
2390 return ERROR_OK;
2391 }
2392 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2393 {
2394 /* can't (explicitly) read from TXRXCTRL register */
2395 return ERROR_OK;
2396 }
2397 else /* Other DBG registers have to be transfered by the debug handler */
2398 {
2399 /* send CP read request (command 0x40) */
2400 xscale_send_u32(target, 0x40);
2401
2402 /* send CP register number */
2403 xscale_send_u32(target, arch_info->dbg_handler_number);
2404
2405 /* read register value */
2406 xscale_read_tx(target, 1);
2407 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2408
2409 reg->dirty = 0;
2410 reg->valid = 1;
2411 }
2412
2413 return ERROR_OK;
2414 }
2415
2416 static int xscale_set_reg(struct reg *reg, uint8_t* buf)
2417 {
2418 struct xscale_reg *arch_info = reg->arch_info;
2419 struct target *target = arch_info->target;
2420 struct xscale_common *xscale = target_to_xscale(target);
2421 uint32_t value = buf_get_u32(buf, 0, 32);
2422
2423 /* DCSR, TX and RX are accessible via JTAG */
2424 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2425 {
2426 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2427 return xscale_write_dcsr(arch_info->target, -1, -1);
2428 }
2429 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2430 {
2431 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2432 return xscale_write_rx(arch_info->target);
2433 }
2434 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2435 {
2436 /* can't write to TX register (debug-handler -> host) */
2437 return ERROR_OK;
2438 }
2439 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2440 {
2441 /* can't (explicitly) write to TXRXCTRL register */
2442 return ERROR_OK;
2443 }
2444 else /* Other DBG registers have to be transfered by the debug handler */
2445 {
2446 /* send CP write request (command 0x41) */
2447 xscale_send_u32(target, 0x41);
2448
2449 /* send CP register number */
2450 xscale_send_u32(target, arch_info->dbg_handler_number);
2451
2452 /* send CP register value */
2453 xscale_send_u32(target, value);
2454 buf_set_u32(reg->value, 0, 32, value);
2455 }
2456
2457 return ERROR_OK;
2458 }
2459
2460 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2461 {
2462 struct xscale_common *xscale = target_to_xscale(target);
2463 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2464 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2465
2466 /* send CP write request (command 0x41) */
2467 xscale_send_u32(target, 0x41);
2468
2469 /* send CP register number */
2470 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2471
2472 /* send CP register value */
2473 xscale_send_u32(target, value);
2474 buf_set_u32(dcsr->value, 0, 32, value);
2475
2476 return ERROR_OK;
2477 }
2478
2479 static int xscale_read_trace(struct target *target)
2480 {
2481 struct xscale_common *xscale = target_to_xscale(target);
2482 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
2483 struct xscale_trace_data **trace_data_p;
2484
2485 /* 258 words from debug handler
2486 * 256 trace buffer entries
2487 * 2 checkpoint addresses
2488 */
2489 uint32_t trace_buffer[258];
2490 int is_address[256];
2491 int i, j;
2492
2493 if (target->state != TARGET_HALTED)
2494 {
2495 LOG_WARNING("target must be stopped to read trace data");
2496 return ERROR_TARGET_NOT_HALTED;
2497 }
2498
2499 /* send read trace buffer command (command 0x61) */
2500 xscale_send_u32(target, 0x61);
2501
2502 /* receive trace buffer content */
2503 xscale_receive(target, trace_buffer, 258);
2504
2505 /* parse buffer backwards to identify address entries */
2506 for (i = 255; i >= 0; i--)
2507 {
2508 is_address[i] = 0;
2509 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2510 ((trace_buffer[i] & 0xf0) == 0xd0))
2511 {
2512 if (i >= 3)
2513 is_address[--i] = 1;
2514 if (i >= 2)
2515 is_address[--i] = 1;
2516 if (i >= 1)
2517 is_address[--i] = 1;
2518 if (i >= 0)
2519 is_address[--i] = 1;
2520 }
2521 }
2522
2523
2524 /* search first non-zero entry */
2525 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2526 ;
2527
2528 if (j == 256)
2529 {
2530 LOG_DEBUG("no trace data collected");
2531 return ERROR_XSCALE_NO_TRACE_DATA;
2532 }
2533
2534 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2535 ;
2536
2537 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2538 (*trace_data_p)->next = NULL;
2539 (*trace_data_p)->chkpt0 = trace_buffer[256];
2540 (*trace_data_p)->chkpt1 = trace_buffer[257];
2541 (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
2542 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2543 (*trace_data_p)->depth = 256 - j;
2544
2545 for (i = j; i < 256; i++)
2546 {
2547 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2548 if (is_address[i])
2549 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2550 else
2551 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2552 }
2553
2554 return ERROR_OK;
2555 }
2556
2557 static int xscale_read_instruction(struct target *target,
2558 struct arm_instruction *instruction)
2559 {
2560 struct xscale_common *xscale = target_to_xscale(target);
2561 int i;
2562 int section = -1;
2563 size_t size_read;
2564 uint32_t opcode;
2565 int retval;
2566
2567 if (!xscale->trace.image)
2568 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2569
2570 /* search for the section the current instruction belongs to */
2571 for (i = 0; i < xscale->trace.image->num_sections; i++)
2572 {
2573 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2574 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2575 {
2576 section = i;
2577 break;
2578 }
2579 }
2580
2581 if (section == -1)
2582 {
2583 /* current instruction couldn't be found in the image */
2584 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2585 }
2586
2587 if (xscale->trace.core_state == ARMV4_5_STATE_ARM)
2588 {
2589 uint8_t buf[4];
2590 if ((retval = image_read_section(xscale->trace.image, section,
2591 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2592 4, buf, &size_read)) != ERROR_OK)
2593 {
2594 LOG_ERROR("error while reading instruction: %i", retval);
2595 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2596 }
2597 opcode = target_buffer_get_u32(target, buf);
2598 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2599 }
2600 else if (xscale->trace.core_state == ARMV4_5_STATE_THUMB)
2601 {
2602 uint8_t buf[2];
2603 if ((retval = image_read_section(xscale->trace.image, section,
2604 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2605 2, buf, &size_read)) != ERROR_OK)
2606 {
2607 LOG_ERROR("error while reading instruction: %i", retval);
2608 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2609 }
2610 opcode = target_buffer_get_u16(target, buf);
2611 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2612 }
2613 else
2614 {
2615 LOG_ERROR("BUG: unknown core state encountered");
2616 exit(-1);
2617 }
2618
2619 return ERROR_OK;
2620 }
2621
2622 static int xscale_branch_address(struct xscale_trace_data *trace_data,
2623 int i, uint32_t *target)
2624 {
2625 /* if there are less than four entries prior to the indirect branch message
2626 * we can't extract the address */
2627 if (i < 4)
2628 {
2629 return -1;
2630 }
2631
2632 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2633 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2634
2635 return 0;
2636 }
2637
2638 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2639 {
2640 struct xscale_common *xscale = target_to_xscale(target);
2641 int next_pc_ok = 0;
2642 uint32_t next_pc = 0x0;
2643 struct xscale_trace_data *trace_data = xscale->trace.data;
2644 int retval;
2645
2646 while (trace_data)
2647 {
2648 int i, chkpt;
2649 int rollover;
2650 int branch;
2651 int exception;
2652 xscale->trace.core_state = ARMV4_5_STATE_ARM;
2653
2654 chkpt = 0;
2655 rollover = 0;
2656
2657 for (i = 0; i < trace_data->depth; i++)
2658 {
2659 next_pc_ok = 0;
2660 branch = 0;
2661 exception = 0;
2662
2663 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2664 continue;
2665
2666 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2667 {
2668 case 0: /* Exceptions */
2669 case 1:
2670 case 2:
2671 case 3:
2672 case 4:
2673 case 5:
2674 case 6:
2675 case 7:
2676 exception = (trace_data->entries[i].data & 0x70) >> 4;
2677 next_pc_ok = 1;
2678 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2679 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2680 break;
2681 case 8: /* Direct Branch */
2682 branch = 1;
2683 break;
2684 case 9: /* Indirect Branch */
2685 branch = 1;
2686 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2687 {
2688 next_pc_ok = 1;
2689 }
2690 break;
2691 case 13: /* Checkpointed Indirect Branch */
2692 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2693 {
2694 next_pc_ok = 1;
2695 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2696 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2697 LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2698 }
2699 /* explicit fall-through */
2700 case 12: /* Checkpointed Direct Branch */
2701 branch = 1;
2702 if (chkpt == 0)
2703 {
2704 next_pc_ok = 1;
2705 next_pc = trace_data->chkpt0;
2706 chkpt++;
2707 }
2708 else if (chkpt == 1)
2709 {
2710 next_pc_ok = 1;
2711 next_pc = trace_data->chkpt0;
2712 chkpt++;
2713 }
2714 else
2715 {
2716 LOG_WARNING("more than two checkpointed branches encountered");
2717 }
2718 break;
2719 case 15: /* Roll-over */
2720 rollover++;
2721 continue;
2722 default: /* Reserved */
2723 command_print(cmd_ctx, "--- reserved trace message ---");
2724 LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2725 return ERROR_OK;
2726 }
2727
2728 if (xscale->trace.pc_ok)
2729 {
2730 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2731 struct arm_instruction instruction;
2732
2733 if ((exception == 6) || (exception == 7))
2734 {
2735 /* IRQ or FIQ exception, no instruction executed */
2736 executed -= 1;
2737 }
2738
2739 while (executed-- >= 0)
2740 {
2741 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2742 {
2743 /* can't continue tracing with no image available */
2744 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2745 {
2746 return retval;
2747 }
2748 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2749 {
2750 /* TODO: handle incomplete images */
2751 }
2752 }
2753
2754 /* a precise abort on a load to the PC is included in the incremental
2755 * word count, other instructions causing data aborts are not included
2756 */
2757 if ((executed == 0) && (exception == 4)
2758 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2759 {
2760 if ((instruction.type == ARM_LDM)
2761 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2762 {
2763 executed--;
2764 }
2765 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2766 && (instruction.info.load_store.Rd != 15))
2767 {
2768 executed--;
2769 }
2770 }
2771
2772 /* only the last instruction executed
2773 * (the one that caused the control flow change)
2774 * could be a taken branch
2775 */
2776 if (((executed == -1) && (branch == 1)) &&
2777 (((instruction.type == ARM_B) ||
2778 (instruction.type == ARM_BL) ||
2779 (instruction.type == ARM_BLX)) &&
2780 (instruction.info.b_bl_bx_blx.target_address != 0xffffffff)))
2781 {
2782 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2783 }
2784 else
2785 {
2786 xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2;
2787 }
2788 command_print(cmd_ctx, "%s", instruction.text);
2789 }
2790
2791 rollover = 0;
2792 }
2793
2794 if (next_pc_ok)
2795 {
2796 xscale->trace.current_pc = next_pc;
2797 xscale->trace.pc_ok = 1;
2798 }
2799 }
2800
2801 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2)
2802 {
2803 struct arm_instruction instruction;
2804 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2805 {
2806 /* can't continue tracing with no image available */
2807 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2808 {
2809 return retval;
2810 }
2811 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2812 {
2813 /* TODO: handle incomplete images */
2814 }
2815 }
2816 command_print(cmd_ctx, "%s", instruction.text);
2817 }
2818
2819 trace_data = trace_data->next;
2820 }
2821
2822 return ERROR_OK;
2823 }
2824
2825 static const struct reg_arch_type xscale_reg_type = {
2826 .get = xscale_get_reg,
2827 .set = xscale_set_reg,
2828 };
2829
2830 static void xscale_build_reg_cache(struct target *target)
2831 {
2832 struct xscale_common *xscale = target_to_xscale(target);
2833 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
2834 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2835 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2836 int i;
2837 int num_regs = sizeof(xscale_reg_arch_info) / sizeof(struct xscale_reg);
2838
2839 (*cache_p) = armv4_5_build_reg_cache(target, armv4_5);
2840
2841 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2842 cache_p = &(*cache_p)->next;
2843
2844 /* fill in values for the xscale reg cache */
2845 (*cache_p)->name = "XScale registers";
2846 (*cache_p)->next = NULL;
2847 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
2848 (*cache_p)->num_regs = num_regs;
2849
2850 for (i = 0; i < num_regs; i++)
2851 {
2852 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2853 (*cache_p)->reg_list[i].value = calloc(4, 1);
2854 (*cache_p)->reg_list[i].dirty = 0;
2855 (*cache_p)->reg_list[i].valid = 0;
2856 (*cache_p)->reg_list[i].size = 32;
2857 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2858 (*cache_p)->reg_list[i].type = &xscale_reg_type;
2859 arch_info[i] = xscale_reg_arch_info[i];
2860 arch_info[i].target = target;
2861 }
2862
2863 xscale->reg_cache = (*cache_p);
2864 }
2865
2866 static int xscale_init_target(struct command_context *cmd_ctx,
2867 struct target *target)
2868 {
2869 xscale_build_reg_cache(target);
2870 return ERROR_OK;
2871 }
2872
2873 static int xscale_init_arch_info(struct target *target,
2874 struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
2875 {
2876 struct arm *armv4_5;
2877 uint32_t high_reset_branch, low_reset_branch;
2878 int i;
2879
2880 armv4_5 = &xscale->armv4_5_common;
2881
2882 /* store architecture specfic data (none so far) */
2883 xscale->common_magic = XSCALE_COMMON_MAGIC;
2884
2885 /* we don't really *need* variant info ... */
2886 if (variant) {
2887 int ir_length = 0;
2888
2889 if (strcmp(variant, "pxa250") == 0
2890 || strcmp(variant, "pxa255") == 0
2891 || strcmp(variant, "pxa26x") == 0)
2892 ir_length = 5;
2893 else if (strcmp(variant, "pxa27x") == 0
2894 || strcmp(variant, "ixp42x") == 0
2895 || strcmp(variant, "ixp45x") == 0
2896 || strcmp(variant, "ixp46x") == 0)
2897 ir_length = 7;
2898 else
2899 LOG_WARNING("%s: unrecognized variant %s",
2900 tap->dotted_name, variant);
2901
2902 if (ir_length && ir_length != tap->ir_length) {
2903 LOG_WARNING("%s: IR length for %s is %d; fixing",
2904 tap->dotted_name, variant, ir_length);
2905 tap->ir_length = ir_length;
2906 }
2907 }
2908
2909 /* the debug handler isn't installed (and thus not running) at this time */
2910 xscale->handler_address = 0xfe000800;
2911
2912 /* clear the vectors we keep locally for reference */
2913 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2914 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2915
2916 /* no user-specified vectors have been configured yet */
2917 xscale->static_low_vectors_set = 0x0;
2918 xscale->static_high_vectors_set = 0x0;
2919
2920 /* calculate branches to debug handler */
2921 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
2922 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
2923
2924 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
2925 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
2926
2927 for (i = 1; i <= 7; i++)
2928 {
2929 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2930 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2931 }
2932
2933 /* 64kB aligned region used for DCache cleaning */
2934 xscale->cache_clean_address = 0xfffe0000;
2935
2936 xscale->hold_rst = 0;
2937 xscale->external_debug_break = 0;
2938
2939 xscale->ibcr_available = 2;
2940 xscale->ibcr0_used = 0;
2941 xscale->ibcr1_used = 0;
2942
2943 xscale->dbr_available = 2;
2944 xscale->dbr0_used = 0;
2945 xscale->dbr1_used = 0;
2946
2947 xscale->arm_bkpt = ARMV5_BKPT(0x0);
2948 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
2949
2950 xscale->vector_catch = 0x1;
2951
2952 xscale->trace.capture_status = TRACE_IDLE;
2953 xscale->trace.data = NULL;
2954 xscale->trace.image = NULL;
2955 xscale->trace.buffer_enabled = 0;
2956 xscale->trace.buffer_fill = 0;
2957
2958 /* prepare ARMv4/5 specific information */
2959 armv4_5->arch_info = xscale;
2960 armv4_5->read_core_reg = xscale_read_core_reg;
2961 armv4_5->write_core_reg = xscale_write_core_reg;
2962 armv4_5->full_context = xscale_full_context;
2963
2964 armv4_5_init_arch_info(target, armv4_5);
2965
2966 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
2967 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
2968 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
2969 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
2970 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
2971 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
2972 xscale->armv4_5_mmu.has_tiny_pages = 1;
2973 xscale->armv4_5_mmu.mmu_enabled = 0;
2974
2975 return ERROR_OK;
2976 }
2977
2978 static int xscale_target_create(struct target *target, Jim_Interp *interp)
2979 {
2980 struct xscale_common *xscale;
2981
2982 if (sizeof xscale_debug_handler - 1 > 0x800) {
2983 LOG_ERROR("debug_handler.bin: larger than 2kb");
2984 return ERROR_FAIL;
2985 }
2986
2987 xscale = calloc(1, sizeof(*xscale));
2988 if (!xscale)
2989 return ERROR_FAIL;
2990
2991 return xscale_init_arch_info(target, xscale, target->tap,
2992 target->variant);
2993 }
2994
2995 COMMAND_HANDLER(xscale_handle_debug_handler_command)
2996 {
2997 struct target *target = NULL;
2998 struct xscale_common *xscale;
2999 int retval;
3000 uint32_t handler_address;
3001
3002 if (CMD_ARGC < 2)
3003 {
3004 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3005 return ERROR_OK;
3006 }
3007
3008 if ((target = get_target(CMD_ARGV[0])) == NULL)
3009 {
3010 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3011 return ERROR_FAIL;
3012 }
3013
3014 xscale = target_to_xscale(target);
3015 retval = xscale_verify_pointer(CMD_CTX, xscale);
3016 if (retval != ERROR_OK)
3017 return retval;
3018
3019 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3020
3021 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3022 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3023 {
3024 xscale->handler_address = handler_address;
3025 }
3026 else
3027 {
3028 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3029 return ERROR_FAIL;
3030 }
3031
3032 return ERROR_OK;
3033 }
3034
3035 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3036 {
3037 struct target *target = NULL;
3038 struct xscale_common *xscale;
3039 int retval;
3040 uint32_t cache_clean_address;
3041
3042 if (CMD_ARGC < 2)
3043 {
3044 return ERROR_COMMAND_SYNTAX_ERROR;
3045 }
3046
3047 target = get_target(CMD_ARGV[0]);
3048 if (target == NULL)
3049 {
3050 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3051 return ERROR_FAIL;
3052 }
3053 xscale = target_to_xscale(target);
3054 retval = xscale_verify_pointer(CMD_CTX, xscale);
3055 if (retval != ERROR_OK)
3056 return retval;
3057
3058 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3059
3060 if (cache_clean_address & 0xffff)
3061 {
3062 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3063 }
3064 else
3065 {
3066 xscale->cache_clean_address = cache_clean_address;
3067 }
3068
3069 return ERROR_OK;
3070 }
3071
3072 COMMAND_HANDLER(xscale_handle_cache_info_command)
3073 {
3074 struct target *target = get_current_target(CMD_CTX);
3075 struct xscale_common *xscale = target_to_xscale(target);
3076 int retval;
3077
3078 retval = xscale_verify_pointer(CMD_CTX, xscale);
3079 if (retval != ERROR_OK)
3080 return retval;
3081
3082 return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
3083 }
3084
3085 static int xscale_virt2phys(struct target *target,
3086 uint32_t virtual, uint32_t *physical)
3087 {
3088 struct xscale_common *xscale = target_to_xscale(target);
3089 int type;
3090 uint32_t cb;
3091 int domain;
3092 uint32_t ap;
3093
3094 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3095 LOG_ERROR(xscale_not);
3096 return ERROR_TARGET_INVALID;
3097 }
3098
3099 uint32_t ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3100 if (type == -1)
3101 {
3102 return ret;
3103 }
3104 *physical = ret;
3105 return ERROR_OK;
3106 }
3107
3108 static int xscale_mmu(struct target *target, int *enabled)
3109 {
3110 struct xscale_common *xscale = target_to_xscale(target);
3111
3112 if (target->state != TARGET_HALTED)
3113 {
3114 LOG_ERROR("Target not halted");
3115 return ERROR_TARGET_INVALID;
3116 }
3117 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3118 return ERROR_OK;
3119 }
3120
3121 COMMAND_HANDLER(xscale_handle_mmu_command)
3122 {
3123 struct target *target = get_current_target(CMD_CTX);
3124 struct xscale_common *xscale = target_to_xscale(target);
3125 int retval;
3126
3127 retval = xscale_verify_pointer(CMD_CTX, xscale);
3128 if (retval != ERROR_OK)
3129 return retval;
3130
3131 if (target->state != TARGET_HALTED)
3132 {
3133 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3134 return ERROR_OK;
3135 }
3136
3137 if (CMD_ARGC >= 1)
3138 {
3139 bool enable;
3140 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3141 if (enable)
3142 xscale_enable_mmu_caches(target, 1, 0, 0);
3143 else
3144 xscale_disable_mmu_caches(target, 1, 0, 0);
3145 xscale->armv4_5_mmu.mmu_enabled = enable;
3146 }
3147
3148 command_print(CMD_CTX, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3149
3150 return ERROR_OK;
3151 }
3152
3153 COMMAND_HANDLER(xscale_handle_idcache_command)
3154 {
3155 struct target *target = get_current_target(CMD_CTX);
3156 struct xscale_common *xscale = target_to_xscale(target);
3157
3158 int retval = xscale_verify_pointer(CMD_CTX, xscale);
3159 if (retval != ERROR_OK)
3160 return retval;
3161
3162 if (target->state != TARGET_HALTED)
3163 {
3164 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3165 return ERROR_OK;
3166 }
3167
3168 bool icache;
3169 COMMAND_PARSE_BOOL(CMD_NAME, icache, "icache", "dcache");
3170
3171 if (CMD_ARGC >= 1)
3172 {
3173 bool enable;
3174 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3175 if (enable)
3176 xscale_enable_mmu_caches(target, 1, 0, 0);
3177 else
3178 xscale_disable_mmu_caches(target, 1, 0, 0);
3179 if (icache)
3180 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3181 else
3182 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3183 }
3184
3185 bool enabled = icache ?
3186 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3187 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3188 const char *msg = enabled ? "enabled" : "disabled";
3189 command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
3190
3191 return ERROR_OK;
3192 }
3193
3194 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3195 {
3196 struct target *target = get_current_target(CMD_CTX);
3197 struct xscale_common *xscale = target_to_xscale(target);
3198 int retval;
3199
3200 retval = xscale_verify_pointer(CMD_CTX, xscale);
3201 if (retval != ERROR_OK)
3202 return retval;
3203
3204 if (CMD_ARGC < 1)
3205 {
3206 command_print(CMD_CTX, "usage: xscale vector_catch [mask]");
3207 }
3208 else
3209 {
3210 COMMAND_PARSE_NUMBER(u8, CMD_ARGV[0], xscale->vector_catch);
3211 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3212 xscale_write_dcsr(target, -1, -1);
3213 }
3214
3215 command_print(CMD_CTX, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3216
3217 return ERROR_OK;
3218 }
3219
3220
3221 COMMAND_HANDLER(xscale_handle_vector_table_command)
3222 {
3223 struct target *target = get_current_target(CMD_CTX);
3224 struct xscale_common *xscale = target_to_xscale(target);
3225 int err = 0;
3226 int retval;
3227
3228 retval = xscale_verify_pointer(CMD_CTX, xscale);
3229 if (retval != ERROR_OK)
3230 return retval;
3231
3232 if (CMD_ARGC == 0) /* print current settings */
3233 {
3234 int idx;
3235
3236 command_print(CMD_CTX, "active user-set static vectors:");
3237 for (idx = 1; idx < 8; idx++)
3238 if (xscale->static_low_vectors_set & (1 << idx))
3239 command_print(CMD_CTX, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
3240 for (idx = 1; idx < 8; idx++)
3241 if (xscale->static_high_vectors_set & (1 << idx))
3242 command_print(CMD_CTX, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
3243 return ERROR_OK;
3244 }
3245
3246 if (CMD_ARGC != 3)
3247 err = 1;
3248 else
3249 {
3250 int idx;
3251 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3252 uint32_t vec;
3253 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3254
3255 if (idx < 1 || idx >= 8)
3256 err = 1;
3257
3258 if (!err && strcmp(CMD_ARGV[0], "low") == 0)
3259 {
3260 xscale->static_low_vectors_set |= (1<<idx);
3261 xscale->static_low_vectors[idx] = vec;
3262 }
3263 else if (!err && (strcmp(CMD_ARGV[0], "high") == 0))
3264 {
3265 xscale->static_high_vectors_set |= (1<<idx);
3266 xscale->static_high_vectors[idx] = vec;
3267 }
3268 else
3269 err = 1;
3270 }
3271
3272 if (err)
3273 command_print(CMD_CTX, "usage: xscale vector_table <high|low> <index> <code>");
3274
3275 return ERROR_OK;
3276 }
3277
3278
3279 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3280 {
3281 struct target *target = get_current_target(CMD_CTX);
3282 struct xscale_common *xscale = target_to_xscale(target);
3283 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
3284 uint32_t dcsr_value;
3285 int retval;
3286
3287 retval = xscale_verify_pointer(CMD_CTX, xscale);
3288 if (retval != ERROR_OK)
3289 return retval;
3290
3291 if (target->state != TARGET_HALTED)
3292 {
3293 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3294 return ERROR_OK;
3295 }
3296
3297 if ((CMD_ARGC >= 1) && (strcmp("enable", CMD_ARGV[0]) == 0))
3298 {
3299 struct xscale_trace_data *td, *next_td;
3300 xscale->trace.buffer_enabled = 1;
3301
3302 /* free old trace data */
3303 td = xscale->trace.data;
3304 while (td)
3305 {
3306 next_td = td->next;
3307
3308 if (td->entries)
3309 free(td->entries);
3310 free(td);
3311 td = next_td;
3312 }
3313 xscale->trace.data = NULL;
3314 }
3315 else if ((CMD_ARGC >= 1) && (strcmp("disable", CMD_ARGV[0]) == 0))
3316 {
3317 xscale->trace.buffer_enabled = 0;
3318 }
3319
3320 if ((CMD_ARGC >= 2) && (strcmp("fill", CMD_ARGV[1]) == 0))
3321 {
3322 uint32_t fill = 1;
3323 if (CMD_ARGC >= 3)
3324 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], fill);
3325 xscale->trace.buffer_fill = fill;
3326 }
3327 else if ((CMD_ARGC >= 2) && (strcmp("wrap", CMD_ARGV[1]) == 0))
3328 {
3329 xscale->trace.buffer_fill = -1;
3330 }
3331
3332 if (xscale->trace.buffer_enabled)
3333 {
3334 /* if we enable the trace buffer in fill-once
3335 * mode we know the address of the first instruction */
3336 xscale->trace.pc_ok = 1;
3337 xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
3338 }
3339 else
3340 {
3341 /* otherwise the address is unknown, and we have no known good PC */
3342 xscale->trace.pc_ok = 0;
3343 }
3344
3345 command_print(CMD_CTX, "trace buffer %s (%s)",
3346 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3347 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3348
3349 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3350 if (xscale->trace.buffer_fill >= 0)
3351 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3352 else
3353 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3354
3355 return ERROR_OK;
3356 }
3357
3358 COMMAND_HANDLER(xscale_handle_trace_image_command)
3359 {
3360 struct target *target = get_current_target(CMD_CTX);
3361 struct xscale_common *xscale = target_to_xscale(target);
3362 int retval;
3363
3364 if (CMD_ARGC < 1)
3365 {
3366 command_print(CMD_CTX, "usage: xscale trace_image <file> [base address] [type]");
3367 return ERROR_OK;
3368 }
3369
3370 retval = xscale_verify_pointer(CMD_CTX, xscale);
3371 if (retval != ERROR_OK)
3372 return retval;
3373
3374 if (xscale->trace.image)
3375 {
3376 image_close(xscale->trace.image);
3377 free(xscale->trace.image);
3378 command_print(CMD_CTX, "previously loaded image found and closed");
3379 }
3380
3381 xscale->trace.image = malloc(sizeof(struct image));
3382 xscale->trace.image->base_address_set = 0;
3383 xscale->trace.image->start_address_set = 0;
3384
3385 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3386 if (CMD_ARGC >= 2)
3387 {
3388 xscale->trace.image->base_address_set = 1;
3389 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], xscale->trace.image->base_address);
3390 }
3391 else
3392 {
3393 xscale->trace.image->base_address_set = 0;
3394 }
3395
3396 if (image_open(xscale->trace.image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3397 {
3398 free(xscale->trace.image);
3399 xscale->trace.image = NULL;
3400 return ERROR_OK;
3401 }
3402
3403 return ERROR_OK;
3404 }
3405
3406 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3407 {
3408 struct target *target = get_current_target(CMD_CTX);
3409 struct xscale_common *xscale = target_to_xscale(target);
3410 struct xscale_trace_data *trace_data;
3411 struct fileio file;
3412 int retval;
3413
3414 retval = xscale_verify_pointer(CMD_CTX, xscale);
3415 if (retval != ERROR_OK)
3416 return retval;
3417
3418 if (target->state != TARGET_HALTED)
3419 {
3420 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3421 return ERROR_OK;
3422 }
3423
3424 if (CMD_ARGC < 1)
3425 {
3426 command_print(CMD_CTX, "usage: xscale dump_trace <file>");
3427 return ERROR_OK;
3428 }
3429
3430 trace_data = xscale->trace.data;
3431
3432 if (!trace_data)
3433 {
3434 command_print(CMD_CTX, "no trace data collected");
3435 return ERROR_OK;
3436 }
3437
3438 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3439 {
3440 return ERROR_OK;
3441 }
3442
3443 while (trace_data)
3444 {
3445 int i;
3446
3447 fileio_write_u32(&file, trace_data->chkpt0);
3448 fileio_write_u32(&file, trace_data->chkpt1);
3449 fileio_write_u32(&file, trace_data->last_instruction);
3450 fileio_write_u32(&file, trace_data->depth);
3451
3452 for (i = 0; i < trace_data->depth; i++)
3453 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3454
3455 trace_data = trace_data->next;
3456 }
3457
3458 fileio_close(&file);
3459
3460 return ERROR_OK;
3461 }
3462
3463 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3464 {
3465 struct target *target = get_current_target(CMD_CTX);
3466 struct xscale_common *xscale = target_to_xscale(target);
3467 int retval;
3468
3469 retval = xscale_verify_pointer(CMD_CTX, xscale);
3470 if (retval != ERROR_OK)
3471 return retval;
3472
3473 xscale_analyze_trace(target, CMD_CTX);
3474
3475 return ERROR_OK;
3476 }
3477
3478 COMMAND_HANDLER(xscale_handle_cp15)
3479 {
3480 struct target *target = get_current_target(CMD_CTX);
3481 struct xscale_common *xscale = target_to_xscale(target);
3482 int retval;
3483
3484 retval = xscale_verify_pointer(CMD_CTX, xscale);
3485 if (retval != ERROR_OK)
3486 return retval;
3487
3488 if (target->state != TARGET_HALTED)
3489 {
3490 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3491 return ERROR_OK;
3492 }
3493 uint32_t reg_no = 0;
3494 struct reg *reg = NULL;
3495 if (CMD_ARGC > 0)
3496 {
3497 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3498 /*translate from xscale cp15 register no to openocd register*/
3499 switch (reg_no)
3500 {
3501 case 0:
3502 reg_no = XSCALE_MAINID;
3503 break;
3504 case 1:
3505 reg_no = XSCALE_CTRL;
3506 break;
3507 case 2:
3508 reg_no = XSCALE_TTB;
3509 break;
3510 case 3:
3511 reg_no = XSCALE_DAC;
3512 break;
3513 case 5:
3514 reg_no = XSCALE_FSR;
3515 break;
3516 case 6:
3517 reg_no = XSCALE_FAR;
3518 break;
3519 case 13:
3520 reg_no = XSCALE_PID;
3521 break;
3522 case 15:
3523 reg_no = XSCALE_CPACCESS;
3524 break;
3525 default:
3526 command_print(CMD_CTX, "invalid register number");
3527 return ERROR_INVALID_ARGUMENTS;
3528 }
3529 reg = &xscale->reg_cache->reg_list[reg_no];
3530
3531 }
3532 if (CMD_ARGC == 1)
3533 {
3534 uint32_t value;
3535
3536 /* read cp15 control register */
3537 xscale_get_reg(reg);
3538 value = buf_get_u32(reg->value, 0, 32);
3539 command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
3540 }
3541 else if (CMD_ARGC == 2)
3542 {
3543 uint32_t value;
3544 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3545
3546 /* send CP write request (command 0x41) */
3547 xscale_send_u32(target, 0x41);
3548
3549 /* send CP register number */
3550 xscale_send_u32(target, reg_no);
3551
3552 /* send CP register value */
3553 xscale_send_u32(target, value);
3554
3555 /* execute cpwait to ensure outstanding operations complete */
3556 xscale_send_u32(target, 0x53);
3557 }
3558 else
3559 {
3560 command_print(CMD_CTX, "usage: cp15 [register]<, [value]>");
3561 }
3562
3563 return ERROR_OK;
3564 }
3565
3566 static int xscale_register_commands(struct command_context *cmd_ctx)
3567 {
3568 struct command *xscale_cmd;
3569
3570 xscale_cmd = register_command(cmd_ctx, NULL, "xscale", NULL, COMMAND_ANY, "xscale specific commands");
3571
3572 register_command(cmd_ctx, xscale_cmd, "debug_handler", xscale_handle_debug_handler_command, COMMAND_ANY, "'xscale debug_handler <target#> <address>' command takes two required operands");
3573 register_command(cmd_ctx, xscale_cmd, "cache_clean_address", xscale_handle_cache_clean_address_command, COMMAND_ANY, NULL);
3574
3575 register_command(cmd_ctx, xscale_cmd, "cache_info", xscale_handle_cache_info_command, COMMAND_EXEC, NULL);
3576 register_command(cmd_ctx, xscale_cmd, "mmu", xscale_handle_mmu_command, COMMAND_EXEC, "['enable'|'disable'] the MMU");
3577 register_command(cmd_ctx, xscale_cmd, "icache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the ICache");
3578 register_command(cmd_ctx, xscale_cmd, "dcache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the DCache");
3579
3580 register_command(cmd_ctx, xscale_cmd, "vector_catch", xscale_handle_vector_catch_command, COMMAND_EXEC, "<mask> of vectors that should be catched");
3581 register_command(cmd_ctx, xscale_cmd, "vector_table", xscale_handle_vector_table_command, COMMAND_EXEC, "<high|low> <index> <code> set static code for exception handler entry");
3582
3583 register_command(cmd_ctx, xscale_cmd, "trace_buffer", xscale_handle_trace_buffer_command, COMMAND_EXEC, "<enable | disable> ['fill' [n]|'wrap']");
3584
3585 register_command(cmd_ctx, xscale_cmd, "dump_trace", xscale_handle_dump_trace_command, COMMAND_EXEC, "dump content of trace buffer to <file>");
3586 register_command(cmd_ctx, xscale_cmd, "analyze_trace", xscale_handle_analyze_trace_buffer_command, COMMAND_EXEC, "analyze content of trace buffer");
3587 register_command(cmd_ctx, xscale_cmd, "trace_image", xscale_handle_trace_image_command,
3588 COMMAND_EXEC, "load image from <file> [base address]");
3589
3590 register_command(cmd_ctx, xscale_cmd, "cp15", xscale_handle_cp15, COMMAND_EXEC, "access coproc 15 <register> [value]");
3591
3592 armv4_5_register_commands(cmd_ctx);
3593
3594 return ERROR_OK;
3595 }
3596
3597 struct target_type xscale_target =
3598 {
3599 .name = "xscale",
3600
3601 .poll = xscale_poll,
3602 .arch_state = xscale_arch_state,
3603
3604 .target_request_data = NULL,
3605
3606 .halt = xscale_halt,
3607 .resume = xscale_resume,
3608 .step = xscale_step,
3609
3610 .assert_reset = xscale_assert_reset,
3611 .deassert_reset = xscale_deassert_reset,
3612 .soft_reset_halt = NULL,
3613
3614 .get_gdb_reg_list = armv4_5_get_gdb_reg_list,
3615
3616 .read_memory = xscale_read_memory,
3617 .write_memory = xscale_write_memory,
3618 .bulk_write_memory = xscale_bulk_write_memory,
3619
3620 .checksum_memory = arm_checksum_memory,
3621 .blank_check_memory = arm_blank_check_memory,
3622
3623 .run_algorithm = armv4_5_run_algorithm,
3624
3625 .add_breakpoint = xscale_add_breakpoint,
3626 .remove_breakpoint = xscale_remove_breakpoint,
3627 .add_watchpoint = xscale_add_watchpoint,
3628 .remove_watchpoint = xscale_remove_watchpoint,
3629
3630 .register_commands = xscale_register_commands,
3631 .target_create = xscale_target_create,
3632 .init_target = xscale_init_target,
3633
3634 .virt2phys = xscale_virt2phys,
3635 .mmu = xscale_mmu
3636 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)