command_handler: change 'args' to CMD_ARGV
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
29
30 #include "breakpoints.h"
31 #include "xscale.h"
32 #include "target_type.h"
33 #include "arm_jtag.h"
34 #include "arm_simulator.h"
35 #include "arm_disassembler.h"
36 #include "time_support.h"
37 #include "register.h"
38 #include "image.h"
39
40
41 /*
42 * Important XScale documents available as of October 2009 include:
43 *
44 * Intel XScale® Core Developer’s Manual, January 2004
45 * Order Number: 273473-002
46 * This has a chapter detailing debug facilities, and punts some
47 * details to chip-specific microarchitecture documents.
48 *
49 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
50 * Document Number: 273539-005
51 * Less detailed than the developer's manual, but summarizes those
52 * missing details (for most XScales) and gives LOTS of notes about
53 * debugger/handler interaction issues. Presents a simpler reset
54 * and load-handler sequence than the arch doc. (Note, OpenOCD
55 * doesn't currently support "Hot-Debug" as defined there.)
56 *
57 * Chip-specific microarchitecture documents may also be useful.
58 */
59
60
61 /* forward declarations */
62 static int xscale_resume(struct target *, int current,
63 uint32_t address, int handle_breakpoints, int debug_execution);
64 static int xscale_debug_entry(struct target *);
65 static int xscale_restore_context(struct target *);
66 static int xscale_get_reg(struct reg *reg);
67 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
68 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
69 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
70 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
71 static int xscale_read_trace(struct target *);
72
73
74 /* This XScale "debug handler" is loaded into the processor's
75 * mini-ICache, which is 2K of code writable only via JTAG.
76 *
77 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
78 * binary files cleanly. It's string oriented, and terminates them
79 * with a NUL character. Better would be to generate the constants
80 * and let other code decide names, scoping, and other housekeeping.
81 */
82 static /* unsigned const char xscale_debug_handler[] = ... */
83 #include "xscale_debug.h"
84
85 static char *const xscale_reg_list[] =
86 {
87 "XSCALE_MAINID", /* 0 */
88 "XSCALE_CACHETYPE",
89 "XSCALE_CTRL",
90 "XSCALE_AUXCTRL",
91 "XSCALE_TTB",
92 "XSCALE_DAC",
93 "XSCALE_FSR",
94 "XSCALE_FAR",
95 "XSCALE_PID",
96 "XSCALE_CPACCESS",
97 "XSCALE_IBCR0", /* 10 */
98 "XSCALE_IBCR1",
99 "XSCALE_DBR0",
100 "XSCALE_DBR1",
101 "XSCALE_DBCON",
102 "XSCALE_TBREG",
103 "XSCALE_CHKPT0",
104 "XSCALE_CHKPT1",
105 "XSCALE_DCSR",
106 "XSCALE_TX",
107 "XSCALE_RX", /* 20 */
108 "XSCALE_TXRXCTRL",
109 };
110
111 static const struct xscale_reg xscale_reg_arch_info[] =
112 {
113 {XSCALE_MAINID, NULL},
114 {XSCALE_CACHETYPE, NULL},
115 {XSCALE_CTRL, NULL},
116 {XSCALE_AUXCTRL, NULL},
117 {XSCALE_TTB, NULL},
118 {XSCALE_DAC, NULL},
119 {XSCALE_FSR, NULL},
120 {XSCALE_FAR, NULL},
121 {XSCALE_PID, NULL},
122 {XSCALE_CPACCESS, NULL},
123 {XSCALE_IBCR0, NULL},
124 {XSCALE_IBCR1, NULL},
125 {XSCALE_DBR0, NULL},
126 {XSCALE_DBR1, NULL},
127 {XSCALE_DBCON, NULL},
128 {XSCALE_TBREG, NULL},
129 {XSCALE_CHKPT0, NULL},
130 {XSCALE_CHKPT1, NULL},
131 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
132 {-1, NULL}, /* TX accessed via JTAG */
133 {-1, NULL}, /* RX accessed via JTAG */
134 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
135 };
136
137 /* convenience wrapper to access XScale specific registers */
138 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
139 {
140 uint8_t buf[4];
141
142 buf_set_u32(buf, 0, 32, value);
143
144 return xscale_set_reg(reg, buf);
145 }
146
147 static const char xscale_not[] = "target is not an XScale";
148
149 static int xscale_verify_pointer(struct command_context *cmd_ctx,
150 struct xscale_common *xscale)
151 {
152 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
153 command_print(cmd_ctx, xscale_not);
154 return ERROR_TARGET_INVALID;
155 }
156 return ERROR_OK;
157 }
158
159 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr)
160 {
161 if (tap == NULL)
162 return ERROR_FAIL;
163
164 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
165 {
166 struct scan_field field;
167 uint8_t scratch[4];
168
169 memset(&field, 0, sizeof field);
170 field.tap = tap;
171 field.num_bits = tap->ir_length;
172 field.out_value = scratch;
173 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
174
175 jtag_add_ir_scan(1, &field, jtag_get_end_state());
176 }
177
178 return ERROR_OK;
179 }
180
181 static int xscale_read_dcsr(struct target *target)
182 {
183 struct xscale_common *xscale = target_to_xscale(target);
184 int retval;
185 struct scan_field fields[3];
186 uint8_t field0 = 0x0;
187 uint8_t field0_check_value = 0x2;
188 uint8_t field0_check_mask = 0x7;
189 uint8_t field2 = 0x0;
190 uint8_t field2_check_value = 0x0;
191 uint8_t field2_check_mask = 0x1;
192
193 jtag_set_end_state(TAP_DRPAUSE);
194 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
195
196 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
197 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
198
199 memset(&fields, 0, sizeof fields);
200
201 fields[0].tap = target->tap;
202 fields[0].num_bits = 3;
203 fields[0].out_value = &field0;
204 uint8_t tmp;
205 fields[0].in_value = &tmp;
206
207 fields[1].tap = target->tap;
208 fields[1].num_bits = 32;
209 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
210
211 fields[2].tap = target->tap;
212 fields[2].num_bits = 1;
213 fields[2].out_value = &field2;
214 uint8_t tmp2;
215 fields[2].in_value = &tmp2;
216
217 jtag_add_dr_scan(3, fields, jtag_get_end_state());
218
219 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
220 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
221
222 if ((retval = jtag_execute_queue()) != ERROR_OK)
223 {
224 LOG_ERROR("JTAG error while reading DCSR");
225 return retval;
226 }
227
228 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
229 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
230
231 /* write the register with the value we just read
232 * on this second pass, only the first bit of field0 is guaranteed to be 0)
233 */
234 field0_check_mask = 0x1;
235 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
236 fields[1].in_value = NULL;
237
238 jtag_set_end_state(TAP_IDLE);
239
240 jtag_add_dr_scan(3, fields, jtag_get_end_state());
241
242 /* DANGER!!! this must be here. It will make sure that the arguments
243 * to jtag_set_check_value() does not go out of scope! */
244 return jtag_execute_queue();
245 }
246
247
248 static void xscale_getbuf(jtag_callback_data_t arg)
249 {
250 uint8_t *in = (uint8_t *)arg;
251 *((uint32_t *)in) = buf_get_u32(in, 0, 32);
252 }
253
254 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
255 {
256 if (num_words == 0)
257 return ERROR_INVALID_ARGUMENTS;
258
259 int retval = ERROR_OK;
260 tap_state_t path[3];
261 struct scan_field fields[3];
262 uint8_t *field0 = malloc(num_words * 1);
263 uint8_t field0_check_value = 0x2;
264 uint8_t field0_check_mask = 0x6;
265 uint32_t *field1 = malloc(num_words * 4);
266 uint8_t field2_check_value = 0x0;
267 uint8_t field2_check_mask = 0x1;
268 int words_done = 0;
269 int words_scheduled = 0;
270 int i;
271
272 path[0] = TAP_DRSELECT;
273 path[1] = TAP_DRCAPTURE;
274 path[2] = TAP_DRSHIFT;
275
276 memset(&fields, 0, sizeof fields);
277
278 fields[0].tap = target->tap;
279 fields[0].num_bits = 3;
280 fields[0].check_value = &field0_check_value;
281 fields[0].check_mask = &field0_check_mask;
282
283 fields[1].tap = target->tap;
284 fields[1].num_bits = 32;
285
286 fields[2].tap = target->tap;
287 fields[2].num_bits = 1;
288 fields[2].check_value = &field2_check_value;
289 fields[2].check_mask = &field2_check_mask;
290
291 jtag_set_end_state(TAP_IDLE);
292 xscale_jtag_set_instr(target->tap, XSCALE_DBGTX);
293 jtag_add_runtest(1, jtag_get_end_state()); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
294
295 /* repeat until all words have been collected */
296 int attempts = 0;
297 while (words_done < num_words)
298 {
299 /* schedule reads */
300 words_scheduled = 0;
301 for (i = words_done; i < num_words; i++)
302 {
303 fields[0].in_value = &field0[i];
304
305 jtag_add_pathmove(3, path);
306
307 fields[1].in_value = (uint8_t *)(field1 + i);
308
309 jtag_add_dr_scan_check(3, fields, jtag_set_end_state(TAP_IDLE));
310
311 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
312
313 words_scheduled++;
314 }
315
316 if ((retval = jtag_execute_queue()) != ERROR_OK)
317 {
318 LOG_ERROR("JTAG error while receiving data from debug handler");
319 break;
320 }
321
322 /* examine results */
323 for (i = words_done; i < num_words; i++)
324 {
325 if (!(field0[0] & 1))
326 {
327 /* move backwards if necessary */
328 int j;
329 for (j = i; j < num_words - 1; j++)
330 {
331 field0[j] = field0[j + 1];
332 field1[j] = field1[j + 1];
333 }
334 words_scheduled--;
335 }
336 }
337 if (words_scheduled == 0)
338 {
339 if (attempts++==1000)
340 {
341 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
342 retval = ERROR_TARGET_TIMEOUT;
343 break;
344 }
345 }
346
347 words_done += words_scheduled;
348 }
349
350 for (i = 0; i < num_words; i++)
351 *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
352
353 free(field1);
354
355 return retval;
356 }
357
358 static int xscale_read_tx(struct target *target, int consume)
359 {
360 struct xscale_common *xscale = target_to_xscale(target);
361 tap_state_t path[3];
362 tap_state_t noconsume_path[6];
363 int retval;
364 struct timeval timeout, now;
365 struct scan_field fields[3];
366 uint8_t field0_in = 0x0;
367 uint8_t field0_check_value = 0x2;
368 uint8_t field0_check_mask = 0x6;
369 uint8_t field2_check_value = 0x0;
370 uint8_t field2_check_mask = 0x1;
371
372 jtag_set_end_state(TAP_IDLE);
373
374 xscale_jtag_set_instr(target->tap, XSCALE_DBGTX);
375
376 path[0] = TAP_DRSELECT;
377 path[1] = TAP_DRCAPTURE;
378 path[2] = TAP_DRSHIFT;
379
380 noconsume_path[0] = TAP_DRSELECT;
381 noconsume_path[1] = TAP_DRCAPTURE;
382 noconsume_path[2] = TAP_DREXIT1;
383 noconsume_path[3] = TAP_DRPAUSE;
384 noconsume_path[4] = TAP_DREXIT2;
385 noconsume_path[5] = TAP_DRSHIFT;
386
387 memset(&fields, 0, sizeof fields);
388
389 fields[0].tap = target->tap;
390 fields[0].num_bits = 3;
391 fields[0].in_value = &field0_in;
392
393 fields[1].tap = target->tap;
394 fields[1].num_bits = 32;
395 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
396
397 fields[2].tap = target->tap;
398 fields[2].num_bits = 1;
399 uint8_t tmp;
400 fields[2].in_value = &tmp;
401
402 gettimeofday(&timeout, NULL);
403 timeval_add_time(&timeout, 1, 0);
404
405 for (;;)
406 {
407 /* if we want to consume the register content (i.e. clear TX_READY),
408 * we have to go straight from Capture-DR to Shift-DR
409 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
410 */
411 if (consume)
412 jtag_add_pathmove(3, path);
413 else
414 {
415 jtag_add_pathmove(sizeof(noconsume_path)/sizeof(*noconsume_path), noconsume_path);
416 }
417
418 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
419
420 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
421 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
422
423 if ((retval = jtag_execute_queue()) != ERROR_OK)
424 {
425 LOG_ERROR("JTAG error while reading TX");
426 return ERROR_TARGET_TIMEOUT;
427 }
428
429 gettimeofday(&now, NULL);
430 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
431 {
432 LOG_ERROR("time out reading TX register");
433 return ERROR_TARGET_TIMEOUT;
434 }
435 if (!((!(field0_in & 1)) && consume))
436 {
437 goto done;
438 }
439 if (debug_level >= 3)
440 {
441 LOG_DEBUG("waiting 100ms");
442 alive_sleep(100); /* avoid flooding the logs */
443 } else
444 {
445 keep_alive();
446 }
447 }
448 done:
449
450 if (!(field0_in & 1))
451 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
452
453 return ERROR_OK;
454 }
455
456 static int xscale_write_rx(struct target *target)
457 {
458 struct xscale_common *xscale = target_to_xscale(target);
459 int retval;
460 struct timeval timeout, now;
461 struct scan_field fields[3];
462 uint8_t field0_out = 0x0;
463 uint8_t field0_in = 0x0;
464 uint8_t field0_check_value = 0x2;
465 uint8_t field0_check_mask = 0x6;
466 uint8_t field2 = 0x0;
467 uint8_t field2_check_value = 0x0;
468 uint8_t field2_check_mask = 0x1;
469
470 jtag_set_end_state(TAP_IDLE);
471
472 xscale_jtag_set_instr(target->tap, XSCALE_DBGRX);
473
474 memset(&fields, 0, sizeof fields);
475
476 fields[0].tap = target->tap;
477 fields[0].num_bits = 3;
478 fields[0].out_value = &field0_out;
479 fields[0].in_value = &field0_in;
480
481 fields[1].tap = target->tap;
482 fields[1].num_bits = 32;
483 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
484
485 fields[2].tap = target->tap;
486 fields[2].num_bits = 1;
487 fields[2].out_value = &field2;
488 uint8_t tmp;
489 fields[2].in_value = &tmp;
490
491 gettimeofday(&timeout, NULL);
492 timeval_add_time(&timeout, 1, 0);
493
494 /* poll until rx_read is low */
495 LOG_DEBUG("polling RX");
496 for (;;)
497 {
498 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
499
500 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
501 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
502
503 if ((retval = jtag_execute_queue()) != ERROR_OK)
504 {
505 LOG_ERROR("JTAG error while writing RX");
506 return retval;
507 }
508
509 gettimeofday(&now, NULL);
510 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
511 {
512 LOG_ERROR("time out writing RX register");
513 return ERROR_TARGET_TIMEOUT;
514 }
515 if (!(field0_in & 1))
516 goto done;
517 if (debug_level >= 3)
518 {
519 LOG_DEBUG("waiting 100ms");
520 alive_sleep(100); /* avoid flooding the logs */
521 } else
522 {
523 keep_alive();
524 }
525 }
526 done:
527
528 /* set rx_valid */
529 field2 = 0x1;
530 jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
531
532 if ((retval = jtag_execute_queue()) != ERROR_OK)
533 {
534 LOG_ERROR("JTAG error while writing RX");
535 return retval;
536 }
537
538 return ERROR_OK;
539 }
540
541 /* send count elements of size byte to the debug handler */
542 static int xscale_send(struct target *target, uint8_t *buffer, int count, int size)
543 {
544 uint32_t t[3];
545 int bits[3];
546 int retval;
547 int done_count = 0;
548
549 jtag_set_end_state(TAP_IDLE);
550
551 xscale_jtag_set_instr(target->tap, XSCALE_DBGRX);
552
553 bits[0]=3;
554 t[0]=0;
555 bits[1]=32;
556 t[2]=1;
557 bits[2]=1;
558 int endianness = target->endianness;
559 while (done_count++ < count)
560 {
561 switch (size)
562 {
563 case 4:
564 if (endianness == TARGET_LITTLE_ENDIAN)
565 {
566 t[1]=le_to_h_u32(buffer);
567 } else
568 {
569 t[1]=be_to_h_u32(buffer);
570 }
571 break;
572 case 2:
573 if (endianness == TARGET_LITTLE_ENDIAN)
574 {
575 t[1]=le_to_h_u16(buffer);
576 } else
577 {
578 t[1]=be_to_h_u16(buffer);
579 }
580 break;
581 case 1:
582 t[1]=buffer[0];
583 break;
584 default:
585 LOG_ERROR("BUG: size neither 4, 2 nor 1");
586 return ERROR_INVALID_ARGUMENTS;
587 }
588 jtag_add_dr_out(target->tap,
589 3,
590 bits,
591 t,
592 jtag_set_end_state(TAP_IDLE));
593 buffer += size;
594 }
595
596 if ((retval = jtag_execute_queue()) != ERROR_OK)
597 {
598 LOG_ERROR("JTAG error while sending data to debug handler");
599 return retval;
600 }
601
602 return ERROR_OK;
603 }
604
605 static int xscale_send_u32(struct target *target, uint32_t value)
606 {
607 struct xscale_common *xscale = target_to_xscale(target);
608
609 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
610 return xscale_write_rx(target);
611 }
612
613 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
614 {
615 struct xscale_common *xscale = target_to_xscale(target);
616 int retval;
617 struct scan_field fields[3];
618 uint8_t field0 = 0x0;
619 uint8_t field0_check_value = 0x2;
620 uint8_t field0_check_mask = 0x7;
621 uint8_t field2 = 0x0;
622 uint8_t field2_check_value = 0x0;
623 uint8_t field2_check_mask = 0x1;
624
625 if (hold_rst != -1)
626 xscale->hold_rst = hold_rst;
627
628 if (ext_dbg_brk != -1)
629 xscale->external_debug_break = ext_dbg_brk;
630
631 jtag_set_end_state(TAP_IDLE);
632 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
633
634 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
635 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
636
637 memset(&fields, 0, sizeof fields);
638
639 fields[0].tap = target->tap;
640 fields[0].num_bits = 3;
641 fields[0].out_value = &field0;
642 uint8_t tmp;
643 fields[0].in_value = &tmp;
644
645 fields[1].tap = target->tap;
646 fields[1].num_bits = 32;
647 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
648
649 fields[2].tap = target->tap;
650 fields[2].num_bits = 1;
651 fields[2].out_value = &field2;
652 uint8_t tmp2;
653 fields[2].in_value = &tmp2;
654
655 jtag_add_dr_scan(3, fields, jtag_get_end_state());
656
657 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
658 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
659
660 if ((retval = jtag_execute_queue()) != ERROR_OK)
661 {
662 LOG_ERROR("JTAG error while writing DCSR");
663 return retval;
664 }
665
666 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
667 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
668
669 return ERROR_OK;
670 }
671
672 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
673 static unsigned int parity (unsigned int v)
674 {
675 // unsigned int ov = v;
676 v ^= v >> 16;
677 v ^= v >> 8;
678 v ^= v >> 4;
679 v &= 0xf;
680 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
681 return (0x6996 >> v) & 1;
682 }
683
684 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
685 {
686 uint8_t packet[4];
687 uint8_t cmd;
688 int word;
689 struct scan_field fields[2];
690
691 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
692
693 /* LDIC into IR */
694 jtag_set_end_state(TAP_IDLE);
695 xscale_jtag_set_instr(target->tap, XSCALE_LDIC);
696
697 /* CMD is b011 to load a cacheline into the Mini ICache.
698 * Loading into the main ICache is deprecated, and unused.
699 * It's followed by three zero bits, and 27 address bits.
700 */
701 buf_set_u32(&cmd, 0, 6, 0x3);
702
703 /* virtual address of desired cache line */
704 buf_set_u32(packet, 0, 27, va >> 5);
705
706 memset(&fields, 0, sizeof fields);
707
708 fields[0].tap = target->tap;
709 fields[0].num_bits = 6;
710 fields[0].out_value = &cmd;
711
712 fields[1].tap = target->tap;
713 fields[1].num_bits = 27;
714 fields[1].out_value = packet;
715
716 jtag_add_dr_scan(2, fields, jtag_get_end_state());
717
718 /* rest of packet is a cacheline: 8 instructions, with parity */
719 fields[0].num_bits = 32;
720 fields[0].out_value = packet;
721
722 fields[1].num_bits = 1;
723 fields[1].out_value = &cmd;
724
725 for (word = 0; word < 8; word++)
726 {
727 buf_set_u32(packet, 0, 32, buffer[word]);
728
729 uint32_t value;
730 memcpy(&value, packet, sizeof(uint32_t));
731 cmd = parity(value);
732
733 jtag_add_dr_scan(2, fields, jtag_get_end_state());
734 }
735
736 return jtag_execute_queue();
737 }
738
739 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
740 {
741 uint8_t packet[4];
742 uint8_t cmd;
743 struct scan_field fields[2];
744
745 jtag_set_end_state(TAP_IDLE);
746 xscale_jtag_set_instr(target->tap, XSCALE_LDIC);
747
748 /* CMD for invalidate IC line b000, bits [6:4] b000 */
749 buf_set_u32(&cmd, 0, 6, 0x0);
750
751 /* virtual address of desired cache line */
752 buf_set_u32(packet, 0, 27, va >> 5);
753
754 memset(&fields, 0, sizeof fields);
755
756 fields[0].tap = target->tap;
757 fields[0].num_bits = 6;
758 fields[0].out_value = &cmd;
759
760 fields[1].tap = target->tap;
761 fields[1].num_bits = 27;
762 fields[1].out_value = packet;
763
764 jtag_add_dr_scan(2, fields, jtag_get_end_state());
765
766 return ERROR_OK;
767 }
768
769 static int xscale_update_vectors(struct target *target)
770 {
771 struct xscale_common *xscale = target_to_xscale(target);
772 int i;
773 int retval;
774
775 uint32_t low_reset_branch, high_reset_branch;
776
777 for (i = 1; i < 8; i++)
778 {
779 /* if there's a static vector specified for this exception, override */
780 if (xscale->static_high_vectors_set & (1 << i))
781 {
782 xscale->high_vectors[i] = xscale->static_high_vectors[i];
783 }
784 else
785 {
786 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
787 if (retval == ERROR_TARGET_TIMEOUT)
788 return retval;
789 if (retval != ERROR_OK)
790 {
791 /* Some of these reads will fail as part of normal execution */
792 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
793 }
794 }
795 }
796
797 for (i = 1; i < 8; i++)
798 {
799 if (xscale->static_low_vectors_set & (1 << i))
800 {
801 xscale->low_vectors[i] = xscale->static_low_vectors[i];
802 }
803 else
804 {
805 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
806 if (retval == ERROR_TARGET_TIMEOUT)
807 return retval;
808 if (retval != ERROR_OK)
809 {
810 /* Some of these reads will fail as part of normal execution */
811 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
812 }
813 }
814 }
815
816 /* calculate branches to debug handler */
817 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
818 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
819
820 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
821 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
822
823 /* invalidate and load exception vectors in mini i-cache */
824 xscale_invalidate_ic_line(target, 0x0);
825 xscale_invalidate_ic_line(target, 0xffff0000);
826
827 xscale_load_ic(target, 0x0, xscale->low_vectors);
828 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
829
830 return ERROR_OK;
831 }
832
833 static int xscale_arch_state(struct target *target)
834 {
835 struct xscale_common *xscale = target_to_xscale(target);
836 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
837
838 static const char *state[] =
839 {
840 "disabled", "enabled"
841 };
842
843 static const char *arch_dbg_reason[] =
844 {
845 "", "\n(processor reset)", "\n(trace buffer full)"
846 };
847
848 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
849 {
850 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
851 return ERROR_INVALID_ARGUMENTS;
852 }
853
854 LOG_USER("target halted in %s state due to %s, current mode: %s\n"
855 "cpsr: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "\n"
856 "MMU: %s, D-Cache: %s, I-Cache: %s"
857 "%s",
858 armv4_5_state_strings[armv4_5->core_state],
859 Jim_Nvp_value2name_simple(nvp_target_debug_reason, target->debug_reason)->name ,
860 armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)],
861 buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32),
862 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32),
863 state[xscale->armv4_5_mmu.mmu_enabled],
864 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
865 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
866 arch_dbg_reason[xscale->arch_debug_reason]);
867
868 return ERROR_OK;
869 }
870
871 static int xscale_poll(struct target *target)
872 {
873 int retval = ERROR_OK;
874
875 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
876 {
877 enum target_state previous_state = target->state;
878 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
879 {
880
881 /* there's data to read from the tx register, we entered debug state */
882 target->state = TARGET_HALTED;
883
884 /* process debug entry, fetching current mode regs */
885 retval = xscale_debug_entry(target);
886 }
887 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
888 {
889 LOG_USER("error while polling TX register, reset CPU");
890 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
891 target->state = TARGET_HALTED;
892 }
893
894 /* debug_entry could have overwritten target state (i.e. immediate resume)
895 * don't signal event handlers in that case
896 */
897 if (target->state != TARGET_HALTED)
898 return ERROR_OK;
899
900 /* if target was running, signal that we halted
901 * otherwise we reentered from debug execution */
902 if (previous_state == TARGET_RUNNING)
903 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
904 else
905 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
906 }
907
908 return retval;
909 }
910
911 static int xscale_debug_entry(struct target *target)
912 {
913 struct xscale_common *xscale = target_to_xscale(target);
914 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
915 uint32_t pc;
916 uint32_t buffer[10];
917 int i;
918 int retval;
919 uint32_t moe;
920
921 /* clear external dbg break (will be written on next DCSR read) */
922 xscale->external_debug_break = 0;
923 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
924 return retval;
925
926 /* get r0, pc, r1 to r7 and cpsr */
927 if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
928 return retval;
929
930 /* move r0 from buffer to register cache */
931 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
932 armv4_5->core_cache->reg_list[0].dirty = 1;
933 armv4_5->core_cache->reg_list[0].valid = 1;
934 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
935
936 /* move pc from buffer to register cache */
937 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
938 armv4_5->core_cache->reg_list[15].dirty = 1;
939 armv4_5->core_cache->reg_list[15].valid = 1;
940 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
941
942 /* move data from buffer to register cache */
943 for (i = 1; i <= 7; i++)
944 {
945 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
946 armv4_5->core_cache->reg_list[i].dirty = 1;
947 armv4_5->core_cache->reg_list[i].valid = 1;
948 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
949 }
950
951 buf_set_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32, buffer[9]);
952 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty = 1;
953 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].valid = 1;
954 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
955
956 armv4_5->core_mode = buffer[9] & 0x1f;
957 if (armv4_5_mode_to_number(armv4_5->core_mode) == -1)
958 {
959 target->state = TARGET_UNKNOWN;
960 LOG_ERROR("cpsr contains invalid mode value - communication failure");
961 return ERROR_TARGET_FAILURE;
962 }
963 LOG_DEBUG("target entered debug state in %s mode", armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)]);
964
965 if (buffer[9] & 0x20)
966 armv4_5->core_state = ARMV4_5_STATE_THUMB;
967 else
968 armv4_5->core_state = ARMV4_5_STATE_ARM;
969
970
971 if (armv4_5_mode_to_number(armv4_5->core_mode)==-1)
972 return ERROR_FAIL;
973
974 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
975 if ((armv4_5->core_mode != ARMV4_5_MODE_USR) && (armv4_5->core_mode != ARMV4_5_MODE_SYS))
976 {
977 xscale_receive(target, buffer, 8);
978 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
979 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).dirty = 0;
980 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).valid = 1;
981 }
982 else
983 {
984 /* r8 to r14, but no spsr */
985 xscale_receive(target, buffer, 7);
986 }
987
988 /* move data from buffer to register cache */
989 for (i = 8; i <= 14; i++)
990 {
991 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).value, 0, 32, buffer[i - 8]);
992 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 0;
993 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid = 1;
994 }
995
996 /* examine debug reason */
997 xscale_read_dcsr(target);
998 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
999
1000 /* stored PC (for calculating fixup) */
1001 pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1002
1003 switch (moe)
1004 {
1005 case 0x0: /* Processor reset */
1006 target->debug_reason = DBG_REASON_DBGRQ;
1007 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
1008 pc -= 4;
1009 break;
1010 case 0x1: /* Instruction breakpoint hit */
1011 target->debug_reason = DBG_REASON_BREAKPOINT;
1012 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1013 pc -= 4;
1014 break;
1015 case 0x2: /* Data breakpoint hit */
1016 target->debug_reason = DBG_REASON_WATCHPOINT;
1017 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1018 pc -= 4;
1019 break;
1020 case 0x3: /* BKPT instruction executed */
1021 target->debug_reason = DBG_REASON_BREAKPOINT;
1022 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1023 pc -= 4;
1024 break;
1025 case 0x4: /* Ext. debug event */
1026 target->debug_reason = DBG_REASON_DBGRQ;
1027 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1028 pc -= 4;
1029 break;
1030 case 0x5: /* Vector trap occured */
1031 target->debug_reason = DBG_REASON_BREAKPOINT;
1032 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1033 pc -= 4;
1034 break;
1035 case 0x6: /* Trace buffer full break */
1036 target->debug_reason = DBG_REASON_DBGRQ;
1037 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1038 pc -= 4;
1039 break;
1040 case 0x7: /* Reserved (may flag Hot-Debug support) */
1041 default:
1042 LOG_ERROR("Method of Entry is 'Reserved'");
1043 exit(-1);
1044 break;
1045 }
1046
1047 /* apply PC fixup */
1048 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
1049
1050 /* on the first debug entry, identify cache type */
1051 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1052 {
1053 uint32_t cache_type_reg;
1054
1055 /* read cp15 cache type register */
1056 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1057 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1058
1059 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1060 }
1061
1062 /* examine MMU and Cache settings */
1063 /* read cp15 control register */
1064 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1065 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1066 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1067 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1068 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1069
1070 /* tracing enabled, read collected trace data */
1071 if (xscale->trace.buffer_enabled)
1072 {
1073 xscale_read_trace(target);
1074 xscale->trace.buffer_fill--;
1075
1076 /* resume if we're still collecting trace data */
1077 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1078 && (xscale->trace.buffer_fill > 0))
1079 {
1080 xscale_resume(target, 1, 0x0, 1, 0);
1081 }
1082 else
1083 {
1084 xscale->trace.buffer_enabled = 0;
1085 }
1086 }
1087
1088 return ERROR_OK;
1089 }
1090
1091 static int xscale_halt(struct target *target)
1092 {
1093 struct xscale_common *xscale = target_to_xscale(target);
1094
1095 LOG_DEBUG("target->state: %s",
1096 target_state_name(target));
1097
1098 if (target->state == TARGET_HALTED)
1099 {
1100 LOG_DEBUG("target was already halted");
1101 return ERROR_OK;
1102 }
1103 else if (target->state == TARGET_UNKNOWN)
1104 {
1105 /* this must not happen for a xscale target */
1106 LOG_ERROR("target was in unknown state when halt was requested");
1107 return ERROR_TARGET_INVALID;
1108 }
1109 else if (target->state == TARGET_RESET)
1110 {
1111 LOG_DEBUG("target->state == TARGET_RESET");
1112 }
1113 else
1114 {
1115 /* assert external dbg break */
1116 xscale->external_debug_break = 1;
1117 xscale_read_dcsr(target);
1118
1119 target->debug_reason = DBG_REASON_DBGRQ;
1120 }
1121
1122 return ERROR_OK;
1123 }
1124
1125 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1126 {
1127 struct xscale_common *xscale = target_to_xscale(target);
1128 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1129 int retval;
1130
1131 if (xscale->ibcr0_used)
1132 {
1133 struct breakpoint *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1134
1135 if (ibcr0_bp)
1136 {
1137 xscale_unset_breakpoint(target, ibcr0_bp);
1138 }
1139 else
1140 {
1141 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1142 exit(-1);
1143 }
1144 }
1145
1146 if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
1147 return retval;
1148
1149 return ERROR_OK;
1150 }
1151
1152 static int xscale_disable_single_step(struct target *target)
1153 {
1154 struct xscale_common *xscale = target_to_xscale(target);
1155 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1156 int retval;
1157
1158 if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
1159 return retval;
1160
1161 return ERROR_OK;
1162 }
1163
1164 static void xscale_enable_watchpoints(struct target *target)
1165 {
1166 struct watchpoint *watchpoint = target->watchpoints;
1167
1168 while (watchpoint)
1169 {
1170 if (watchpoint->set == 0)
1171 xscale_set_watchpoint(target, watchpoint);
1172 watchpoint = watchpoint->next;
1173 }
1174 }
1175
1176 static void xscale_enable_breakpoints(struct target *target)
1177 {
1178 struct breakpoint *breakpoint = target->breakpoints;
1179
1180 /* set any pending breakpoints */
1181 while (breakpoint)
1182 {
1183 if (breakpoint->set == 0)
1184 xscale_set_breakpoint(target, breakpoint);
1185 breakpoint = breakpoint->next;
1186 }
1187 }
1188
1189 static int xscale_resume(struct target *target, int current,
1190 uint32_t address, int handle_breakpoints, int debug_execution)
1191 {
1192 struct xscale_common *xscale = target_to_xscale(target);
1193 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
1194 struct breakpoint *breakpoint = target->breakpoints;
1195 uint32_t current_pc;
1196 int retval;
1197 int i;
1198
1199 LOG_DEBUG("-");
1200
1201 if (target->state != TARGET_HALTED)
1202 {
1203 LOG_WARNING("target not halted");
1204 return ERROR_TARGET_NOT_HALTED;
1205 }
1206
1207 if (!debug_execution)
1208 {
1209 target_free_all_working_areas(target);
1210 }
1211
1212 /* update vector tables */
1213 if ((retval = xscale_update_vectors(target)) != ERROR_OK)
1214 return retval;
1215
1216 /* current = 1: continue on current pc, otherwise continue at <address> */
1217 if (!current)
1218 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1219
1220 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1221
1222 /* if we're at the reset vector, we have to simulate the branch */
1223 if (current_pc == 0x0)
1224 {
1225 arm_simulate_step(target, NULL);
1226 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1227 }
1228
1229 /* the front-end may request us not to handle breakpoints */
1230 if (handle_breakpoints)
1231 {
1232 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1233 {
1234 uint32_t next_pc;
1235
1236 /* there's a breakpoint at the current PC, we have to step over it */
1237 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1238 xscale_unset_breakpoint(target, breakpoint);
1239
1240 /* calculate PC of next instruction */
1241 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1242 {
1243 uint32_t current_opcode;
1244 target_read_u32(target, current_pc, &current_opcode);
1245 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1246 }
1247
1248 LOG_DEBUG("enable single-step");
1249 xscale_enable_single_step(target, next_pc);
1250
1251 /* restore banked registers */
1252 xscale_restore_context(target);
1253
1254 /* send resume request (command 0x30 or 0x31)
1255 * clean the trace buffer if it is to be enabled (0x62) */
1256 if (xscale->trace.buffer_enabled)
1257 {
1258 xscale_send_u32(target, 0x62);
1259 xscale_send_u32(target, 0x31);
1260 }
1261 else
1262 xscale_send_u32(target, 0x30);
1263
1264 /* send CPSR */
1265 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1266 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1267
1268 for (i = 7; i >= 0; i--)
1269 {
1270 /* send register */
1271 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1272 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1273 }
1274
1275 /* send PC */
1276 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1277 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1278
1279 /* wait for and process debug entry */
1280 xscale_debug_entry(target);
1281
1282 LOG_DEBUG("disable single-step");
1283 xscale_disable_single_step(target);
1284
1285 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1286 xscale_set_breakpoint(target, breakpoint);
1287 }
1288 }
1289
1290 /* enable any pending breakpoints and watchpoints */
1291 xscale_enable_breakpoints(target);
1292 xscale_enable_watchpoints(target);
1293
1294 /* restore banked registers */
1295 xscale_restore_context(target);
1296
1297 /* send resume request (command 0x30 or 0x31)
1298 * clean the trace buffer if it is to be enabled (0x62) */
1299 if (xscale->trace.buffer_enabled)
1300 {
1301 xscale_send_u32(target, 0x62);
1302 xscale_send_u32(target, 0x31);
1303 }
1304 else
1305 xscale_send_u32(target, 0x30);
1306
1307 /* send CPSR */
1308 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1309 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1310
1311 for (i = 7; i >= 0; i--)
1312 {
1313 /* send register */
1314 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1315 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1316 }
1317
1318 /* send PC */
1319 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1320 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1321
1322 target->debug_reason = DBG_REASON_NOTHALTED;
1323
1324 if (!debug_execution)
1325 {
1326 /* registers are now invalid */
1327 armv4_5_invalidate_core_regs(target);
1328 target->state = TARGET_RUNNING;
1329 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1330 }
1331 else
1332 {
1333 target->state = TARGET_DEBUG_RUNNING;
1334 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1335 }
1336
1337 LOG_DEBUG("target resumed");
1338
1339 return ERROR_OK;
1340 }
1341
1342 static int xscale_step_inner(struct target *target, int current,
1343 uint32_t address, int handle_breakpoints)
1344 {
1345 struct xscale_common *xscale = target_to_xscale(target);
1346 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
1347 uint32_t next_pc;
1348 int retval;
1349 int i;
1350
1351 target->debug_reason = DBG_REASON_SINGLESTEP;
1352
1353 /* calculate PC of next instruction */
1354 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1355 {
1356 uint32_t current_opcode, current_pc;
1357 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1358
1359 target_read_u32(target, current_pc, &current_opcode);
1360 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1361 return retval;
1362 }
1363
1364 LOG_DEBUG("enable single-step");
1365 if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
1366 return retval;
1367
1368 /* restore banked registers */
1369 if ((retval = xscale_restore_context(target)) != ERROR_OK)
1370 return retval;
1371
1372 /* send resume request (command 0x30 or 0x31)
1373 * clean the trace buffer if it is to be enabled (0x62) */
1374 if (xscale->trace.buffer_enabled)
1375 {
1376 if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
1377 return retval;
1378 if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
1379 return retval;
1380 }
1381 else
1382 if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
1383 return retval;
1384
1385 /* send CPSR */
1386 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32))) != ERROR_OK)
1387 return retval;
1388 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1389
1390 for (i = 7; i >= 0; i--)
1391 {
1392 /* send register */
1393 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
1394 return retval;
1395 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1396 }
1397
1398 /* send PC */
1399 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))) != ERROR_OK)
1400 return retval;
1401 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1402
1403 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1404
1405 /* registers are now invalid */
1406 if ((retval = armv4_5_invalidate_core_regs(target)) != ERROR_OK)
1407 return retval;
1408
1409 /* wait for and process debug entry */
1410 if ((retval = xscale_debug_entry(target)) != ERROR_OK)
1411 return retval;
1412
1413 LOG_DEBUG("disable single-step");
1414 if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
1415 return retval;
1416
1417 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1418
1419 return ERROR_OK;
1420 }
1421
1422 static int xscale_step(struct target *target, int current,
1423 uint32_t address, int handle_breakpoints)
1424 {
1425 struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
1426 struct breakpoint *breakpoint = target->breakpoints;
1427
1428 uint32_t current_pc;
1429 int retval;
1430
1431 if (target->state != TARGET_HALTED)
1432 {
1433 LOG_WARNING("target not halted");
1434 return ERROR_TARGET_NOT_HALTED;
1435 }
1436
1437 /* current = 1: continue on current pc, otherwise continue at <address> */
1438 if (!current)
1439 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1440
1441 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1442
1443 /* if we're at the reset vector, we have to simulate the step */
1444 if (current_pc == 0x0)
1445 {
1446 if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
1447 return retval;
1448 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1449
1450 target->debug_reason = DBG_REASON_SINGLESTEP;
1451 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1452
1453 return ERROR_OK;
1454 }
1455
1456 /* the front-end may request us not to handle breakpoints */
1457 if (handle_breakpoints)
1458 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1459 {
1460 if ((retval = xscale_unset_breakpoint(target, breakpoint)) != ERROR_OK)
1461 return retval;
1462 }
1463
1464 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1465
1466 if (breakpoint)
1467 {
1468 xscale_set_breakpoint(target, breakpoint);
1469 }
1470
1471 LOG_DEBUG("target stepped");
1472
1473 return ERROR_OK;
1474
1475 }
1476
1477 static int xscale_assert_reset(struct target *target)
1478 {
1479 struct xscale_common *xscale = target_to_xscale(target);
1480
1481 LOG_DEBUG("target->state: %s",
1482 target_state_name(target));
1483
1484 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1485 * end up in T-L-R, which would reset JTAG
1486 */
1487 jtag_set_end_state(TAP_IDLE);
1488 xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
1489
1490 /* set Hold reset, Halt mode and Trap Reset */
1491 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1492 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1493 xscale_write_dcsr(target, 1, 0);
1494
1495 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1496 xscale_jtag_set_instr(target->tap, 0x7f);
1497 jtag_execute_queue();
1498
1499 /* assert reset */
1500 jtag_add_reset(0, 1);
1501
1502 /* sleep 1ms, to be sure we fulfill any requirements */
1503 jtag_add_sleep(1000);
1504 jtag_execute_queue();
1505
1506 target->state = TARGET_RESET;
1507
1508 if (target->reset_halt)
1509 {
1510 int retval;
1511 if ((retval = target_halt(target)) != ERROR_OK)
1512 return retval;
1513 }
1514
1515 return ERROR_OK;
1516 }
1517
1518 static int xscale_deassert_reset(struct target *target)
1519 {
1520 struct xscale_common *xscale = target_to_xscale(target);
1521 struct breakpoint *breakpoint = target->breakpoints;
1522
1523 LOG_DEBUG("-");
1524
1525 xscale->ibcr_available = 2;
1526 xscale->ibcr0_used = 0;
1527 xscale->ibcr1_used = 0;
1528
1529 xscale->dbr_available = 2;
1530 xscale->dbr0_used = 0;
1531 xscale->dbr1_used = 0;
1532
1533 /* mark all hardware breakpoints as unset */
1534 while (breakpoint)
1535 {
1536 if (breakpoint->type == BKPT_HARD)
1537 {
1538 breakpoint->set = 0;
1539 }
1540 breakpoint = breakpoint->next;
1541 }
1542
1543 armv4_5_invalidate_core_regs(target);
1544
1545 /* FIXME mark hardware watchpoints got unset too. Also,
1546 * at least some of the XScale registers are invalid...
1547 */
1548
1549 /*
1550 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1551 * contents got invalidated. Safer to force that, so writing new
1552 * contents can't ever fail..
1553 */
1554 {
1555 uint32_t address;
1556 unsigned buf_cnt;
1557 const uint8_t *buffer = xscale_debug_handler;
1558 int retval;
1559
1560 /* release SRST */
1561 jtag_add_reset(0, 0);
1562
1563 /* wait 300ms; 150 and 100ms were not enough */
1564 jtag_add_sleep(300*1000);
1565
1566 jtag_add_runtest(2030, jtag_set_end_state(TAP_IDLE));
1567 jtag_execute_queue();
1568
1569 /* set Hold reset, Halt mode and Trap Reset */
1570 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1571 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1572 xscale_write_dcsr(target, 1, 0);
1573
1574 /* Load the debug handler into the mini-icache. Since
1575 * it's using halt mode (not monitor mode), it runs in
1576 * "Special Debug State" for access to registers, memory,
1577 * coprocessors, trace data, etc.
1578 */
1579 address = xscale->handler_address;
1580 for (unsigned binary_size = sizeof xscale_debug_handler - 1;
1581 binary_size > 0;
1582 binary_size -= buf_cnt, buffer += buf_cnt)
1583 {
1584 uint32_t cache_line[8];
1585 unsigned i;
1586
1587 buf_cnt = binary_size;
1588 if (buf_cnt > 32)
1589 buf_cnt = 32;
1590
1591 for (i = 0; i < buf_cnt; i += 4)
1592 {
1593 /* convert LE buffer to host-endian uint32_t */
1594 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1595 }
1596
1597 for (; i < 32; i += 4)
1598 {
1599 cache_line[i / 4] = 0xe1a08008;
1600 }
1601
1602 /* only load addresses other than the reset vectors */
1603 if ((address % 0x400) != 0x0)
1604 {
1605 retval = xscale_load_ic(target, address,
1606 cache_line);
1607 if (retval != ERROR_OK)
1608 return retval;
1609 }
1610
1611 address += buf_cnt;
1612 };
1613
1614 retval = xscale_load_ic(target, 0x0,
1615 xscale->low_vectors);
1616 if (retval != ERROR_OK)
1617 return retval;
1618 retval = xscale_load_ic(target, 0xffff0000,
1619 xscale->high_vectors);
1620 if (retval != ERROR_OK)
1621 return retval;
1622
1623 jtag_add_runtest(30, jtag_set_end_state(TAP_IDLE));
1624
1625 jtag_add_sleep(100000);
1626
1627 /* set Hold reset, Halt mode and Trap Reset */
1628 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1629 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1630 xscale_write_dcsr(target, 1, 0);
1631
1632 /* clear Hold reset to let the target run (should enter debug handler) */
1633 xscale_write_dcsr(target, 0, 1);
1634 target->state = TARGET_RUNNING;
1635
1636 if (!target->reset_halt)
1637 {
1638 jtag_add_sleep(10000);
1639
1640 /* we should have entered debug now */
1641 xscale_debug_entry(target);
1642 target->state = TARGET_HALTED;
1643
1644 /* resume the target */
1645 xscale_resume(target, 1, 0x0, 1, 0);
1646 }
1647 }
1648
1649 return ERROR_OK;
1650 }
1651
1652 static int xscale_read_core_reg(struct target *target, int num,
1653 enum armv4_5_mode mode)
1654 {
1655 LOG_ERROR("not implemented");
1656 return ERROR_OK;
1657 }
1658
1659 static int xscale_write_core_reg(struct target *target, int num,
1660 enum armv4_5_mode mode, uint32_t value)
1661 {
1662 LOG_ERROR("not implemented");
1663 return ERROR_OK;
1664 }
1665
1666 static int xscale_full_context(struct target *target)
1667 {
1668 struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
1669
1670 uint32_t *buffer;
1671
1672 int i, j;
1673
1674 LOG_DEBUG("-");
1675
1676 if (target->state != TARGET_HALTED)
1677 {
1678 LOG_WARNING("target not halted");
1679 return ERROR_TARGET_NOT_HALTED;
1680 }
1681
1682 buffer = malloc(4 * 8);
1683
1684 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1685 * we can't enter User mode on an XScale (unpredictable),
1686 * but User shares registers with SYS
1687 */
1688 for (i = 1; i < 7; i++)
1689 {
1690 int valid = 1;
1691
1692 /* check if there are invalid registers in the current mode
1693 */
1694 for (j = 0; j <= 16; j++)
1695 {
1696 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid == 0)
1697 valid = 0;
1698 }
1699
1700 if (!valid)
1701 {
1702 uint32_t tmp_cpsr;
1703
1704 /* request banked registers */
1705 xscale_send_u32(target, 0x0);
1706
1707 tmp_cpsr = 0x0;
1708 tmp_cpsr |= armv4_5_number_to_mode(i);
1709 tmp_cpsr |= 0xc0; /* I/F bits */
1710
1711 /* send CPSR for desired mode */
1712 xscale_send_u32(target, tmp_cpsr);
1713
1714 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1715 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1716 {
1717 xscale_receive(target, buffer, 8);
1718 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1719 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1720 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).valid = 1;
1721 }
1722 else
1723 {
1724 xscale_receive(target, buffer, 7);
1725 }
1726
1727 /* move data from buffer to register cache */
1728 for (j = 8; j <= 14; j++)
1729 {
1730 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).value, 0, 32, buffer[j - 8]);
1731 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1732 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid = 1;
1733 }
1734 }
1735 }
1736
1737 free(buffer);
1738
1739 return ERROR_OK;
1740 }
1741
1742 static int xscale_restore_context(struct target *target)
1743 {
1744 struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
1745
1746 int i, j;
1747
1748 if (target->state != TARGET_HALTED)
1749 {
1750 LOG_WARNING("target not halted");
1751 return ERROR_TARGET_NOT_HALTED;
1752 }
1753
1754 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1755 * we can't enter User mode on an XScale (unpredictable),
1756 * but User shares registers with SYS
1757 */
1758 for (i = 1; i < 7; i++)
1759 {
1760 int dirty = 0;
1761
1762 /* check if there are invalid registers in the current mode
1763 */
1764 for (j = 8; j <= 14; j++)
1765 {
1766 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty == 1)
1767 dirty = 1;
1768 }
1769
1770 /* if not USR/SYS, check if the SPSR needs to be written */
1771 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1772 {
1773 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty == 1)
1774 dirty = 1;
1775 }
1776
1777 if (dirty)
1778 {
1779 uint32_t tmp_cpsr;
1780
1781 /* send banked registers */
1782 xscale_send_u32(target, 0x1);
1783
1784 tmp_cpsr = 0x0;
1785 tmp_cpsr |= armv4_5_number_to_mode(i);
1786 tmp_cpsr |= 0xc0; /* I/F bits */
1787
1788 /* send CPSR for desired mode */
1789 xscale_send_u32(target, tmp_cpsr);
1790
1791 /* send banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1792 for (j = 8; j <= 14; j++)
1793 {
1794 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, j).value, 0, 32));
1795 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1796 }
1797
1798 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1799 {
1800 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32));
1801 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1802 }
1803 }
1804 }
1805
1806 return ERROR_OK;
1807 }
1808
1809 static int xscale_read_memory(struct target *target, uint32_t address,
1810 uint32_t size, uint32_t count, uint8_t *buffer)
1811 {
1812 struct xscale_common *xscale = target_to_xscale(target);
1813 uint32_t *buf32;
1814 uint32_t i;
1815 int retval;
1816
1817 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1818
1819 if (target->state != TARGET_HALTED)
1820 {
1821 LOG_WARNING("target not halted");
1822 return ERROR_TARGET_NOT_HALTED;
1823 }
1824
1825 /* sanitize arguments */
1826 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1827 return ERROR_INVALID_ARGUMENTS;
1828
1829 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1830 return ERROR_TARGET_UNALIGNED_ACCESS;
1831
1832 /* send memory read request (command 0x1n, n: access size) */
1833 if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
1834 return retval;
1835
1836 /* send base address for read request */
1837 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1838 return retval;
1839
1840 /* send number of requested data words */
1841 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1842 return retval;
1843
1844 /* receive data from target (count times 32-bit words in host endianness) */
1845 buf32 = malloc(4 * count);
1846 if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
1847 return retval;
1848
1849 /* extract data from host-endian buffer into byte stream */
1850 for (i = 0; i < count; i++)
1851 {
1852 switch (size)
1853 {
1854 case 4:
1855 target_buffer_set_u32(target, buffer, buf32[i]);
1856 buffer += 4;
1857 break;
1858 case 2:
1859 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1860 buffer += 2;
1861 break;
1862 case 1:
1863 *buffer++ = buf32[i] & 0xff;
1864 break;
1865 default:
1866 LOG_ERROR("invalid read size");
1867 return ERROR_INVALID_ARGUMENTS;
1868 }
1869 }
1870
1871 free(buf32);
1872
1873 /* examine DCSR, to see if Sticky Abort (SA) got set */
1874 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1875 return retval;
1876 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1877 {
1878 /* clear SA bit */
1879 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1880 return retval;
1881
1882 return ERROR_TARGET_DATA_ABORT;
1883 }
1884
1885 return ERROR_OK;
1886 }
1887
1888 static int xscale_write_memory(struct target *target, uint32_t address,
1889 uint32_t size, uint32_t count, uint8_t *buffer)
1890 {
1891 struct xscale_common *xscale = target_to_xscale(target);
1892 int retval;
1893
1894 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1895
1896 if (target->state != TARGET_HALTED)
1897 {
1898 LOG_WARNING("target not halted");
1899 return ERROR_TARGET_NOT_HALTED;
1900 }
1901
1902 /* sanitize arguments */
1903 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1904 return ERROR_INVALID_ARGUMENTS;
1905
1906 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1907 return ERROR_TARGET_UNALIGNED_ACCESS;
1908
1909 /* send memory write request (command 0x2n, n: access size) */
1910 if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
1911 return retval;
1912
1913 /* send base address for read request */
1914 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1915 return retval;
1916
1917 /* send number of requested data words to be written*/
1918 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1919 return retval;
1920
1921 /* extract data from host-endian buffer into byte stream */
1922 #if 0
1923 for (i = 0; i < count; i++)
1924 {
1925 switch (size)
1926 {
1927 case 4:
1928 value = target_buffer_get_u32(target, buffer);
1929 xscale_send_u32(target, value);
1930 buffer += 4;
1931 break;
1932 case 2:
1933 value = target_buffer_get_u16(target, buffer);
1934 xscale_send_u32(target, value);
1935 buffer += 2;
1936 break;
1937 case 1:
1938 value = *buffer;
1939 xscale_send_u32(target, value);
1940 buffer += 1;
1941 break;
1942 default:
1943 LOG_ERROR("should never get here");
1944 exit(-1);
1945 }
1946 }
1947 #endif
1948 if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
1949 return retval;
1950
1951 /* examine DCSR, to see if Sticky Abort (SA) got set */
1952 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1953 return retval;
1954 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1955 {
1956 /* clear SA bit */
1957 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1958 return retval;
1959
1960 return ERROR_TARGET_DATA_ABORT;
1961 }
1962
1963 return ERROR_OK;
1964 }
1965
1966 static int xscale_bulk_write_memory(struct target *target, uint32_t address,
1967 uint32_t count, uint8_t *buffer)
1968 {
1969 return xscale_write_memory(target, address, 4, count, buffer);
1970 }
1971
1972 static uint32_t xscale_get_ttb(struct target *target)
1973 {
1974 struct xscale_common *xscale = target_to_xscale(target);
1975 uint32_t ttb;
1976
1977 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
1978 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
1979
1980 return ttb;
1981 }
1982
1983 static void xscale_disable_mmu_caches(struct target *target, int mmu,
1984 int d_u_cache, int i_cache)
1985 {
1986 struct xscale_common *xscale = target_to_xscale(target);
1987 uint32_t cp15_control;
1988
1989 /* read cp15 control register */
1990 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1991 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1992
1993 if (mmu)
1994 cp15_control &= ~0x1U;
1995
1996 if (d_u_cache)
1997 {
1998 /* clean DCache */
1999 xscale_send_u32(target, 0x50);
2000 xscale_send_u32(target, xscale->cache_clean_address);
2001
2002 /* invalidate DCache */
2003 xscale_send_u32(target, 0x51);
2004
2005 cp15_control &= ~0x4U;
2006 }
2007
2008 if (i_cache)
2009 {
2010 /* invalidate ICache */
2011 xscale_send_u32(target, 0x52);
2012 cp15_control &= ~0x1000U;
2013 }
2014
2015 /* write new cp15 control register */
2016 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2017
2018 /* execute cpwait to ensure outstanding operations complete */
2019 xscale_send_u32(target, 0x53);
2020 }
2021
2022 static void xscale_enable_mmu_caches(struct target *target, int mmu,
2023 int d_u_cache, int i_cache)
2024 {
2025 struct xscale_common *xscale = target_to_xscale(target);
2026 uint32_t cp15_control;
2027
2028 /* read cp15 control register */
2029 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2030 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2031
2032 if (mmu)
2033 cp15_control |= 0x1U;
2034
2035 if (d_u_cache)
2036 cp15_control |= 0x4U;
2037
2038 if (i_cache)
2039 cp15_control |= 0x1000U;
2040
2041 /* write new cp15 control register */
2042 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2043
2044 /* execute cpwait to ensure outstanding operations complete */
2045 xscale_send_u32(target, 0x53);
2046 }
2047
2048 static int xscale_set_breakpoint(struct target *target,
2049 struct breakpoint *breakpoint)
2050 {
2051 int retval;
2052 struct xscale_common *xscale = target_to_xscale(target);
2053
2054 if (target->state != TARGET_HALTED)
2055 {
2056 LOG_WARNING("target not halted");
2057 return ERROR_TARGET_NOT_HALTED;
2058 }
2059
2060 if (breakpoint->set)
2061 {
2062 LOG_WARNING("breakpoint already set");
2063 return ERROR_OK;
2064 }
2065
2066 if (breakpoint->type == BKPT_HARD)
2067 {
2068 uint32_t value = breakpoint->address | 1;
2069 if (!xscale->ibcr0_used)
2070 {
2071 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2072 xscale->ibcr0_used = 1;
2073 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2074 }
2075 else if (!xscale->ibcr1_used)
2076 {
2077 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2078 xscale->ibcr1_used = 1;
2079 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2080 }
2081 else
2082 {
2083 LOG_ERROR("BUG: no hardware comparator available");
2084 return ERROR_OK;
2085 }
2086 }
2087 else if (breakpoint->type == BKPT_SOFT)
2088 {
2089 if (breakpoint->length == 4)
2090 {
2091 /* keep the original instruction in target endianness */
2092 if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2093 {
2094 return retval;
2095 }
2096 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2097 if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2098 {
2099 return retval;
2100 }
2101 }
2102 else
2103 {
2104 /* keep the original instruction in target endianness */
2105 if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2106 {
2107 return retval;
2108 }
2109 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2110 if ((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2111 {
2112 return retval;
2113 }
2114 }
2115 breakpoint->set = 1;
2116 }
2117
2118 return ERROR_OK;
2119 }
2120
2121 static int xscale_add_breakpoint(struct target *target,
2122 struct breakpoint *breakpoint)
2123 {
2124 struct xscale_common *xscale = target_to_xscale(target);
2125
2126 if (target->state != TARGET_HALTED)
2127 {
2128 LOG_WARNING("target not halted");
2129 return ERROR_TARGET_NOT_HALTED;
2130 }
2131
2132 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2133 {
2134 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2135 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2136 }
2137
2138 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2139 {
2140 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2141 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2142 }
2143
2144 if (breakpoint->type == BKPT_HARD)
2145 {
2146 xscale->ibcr_available--;
2147 }
2148
2149 return ERROR_OK;
2150 }
2151
2152 static int xscale_unset_breakpoint(struct target *target,
2153 struct breakpoint *breakpoint)
2154 {
2155 int retval;
2156 struct xscale_common *xscale = target_to_xscale(target);
2157
2158 if (target->state != TARGET_HALTED)
2159 {
2160 LOG_WARNING("target not halted");
2161 return ERROR_TARGET_NOT_HALTED;
2162 }
2163
2164 if (!breakpoint->set)
2165 {
2166 LOG_WARNING("breakpoint not set");
2167 return ERROR_OK;
2168 }
2169
2170 if (breakpoint->type == BKPT_HARD)
2171 {
2172 if (breakpoint->set == 1)
2173 {
2174 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2175 xscale->ibcr0_used = 0;
2176 }
2177 else if (breakpoint->set == 2)
2178 {
2179 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2180 xscale->ibcr1_used = 0;
2181 }
2182 breakpoint->set = 0;
2183 }
2184 else
2185 {
2186 /* restore original instruction (kept in target endianness) */
2187 if (breakpoint->length == 4)
2188 {
2189 if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2190 {
2191 return retval;
2192 }
2193 }
2194 else
2195 {
2196 if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2197 {
2198 return retval;
2199 }
2200 }
2201 breakpoint->set = 0;
2202 }
2203
2204 return ERROR_OK;
2205 }
2206
2207 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2208 {
2209 struct xscale_common *xscale = target_to_xscale(target);
2210
2211 if (target->state != TARGET_HALTED)
2212 {
2213 LOG_WARNING("target not halted");
2214 return ERROR_TARGET_NOT_HALTED;
2215 }
2216
2217 if (breakpoint->set)
2218 {
2219 xscale_unset_breakpoint(target, breakpoint);
2220 }
2221
2222 if (breakpoint->type == BKPT_HARD)
2223 xscale->ibcr_available++;
2224
2225 return ERROR_OK;
2226 }
2227
2228 static int xscale_set_watchpoint(struct target *target,
2229 struct watchpoint *watchpoint)
2230 {
2231 struct xscale_common *xscale = target_to_xscale(target);
2232 uint8_t enable = 0;
2233 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2234 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2235
2236 if (target->state != TARGET_HALTED)
2237 {
2238 LOG_WARNING("target not halted");
2239 return ERROR_TARGET_NOT_HALTED;
2240 }
2241
2242 xscale_get_reg(dbcon);
2243
2244 switch (watchpoint->rw)
2245 {
2246 case WPT_READ:
2247 enable = 0x3;
2248 break;
2249 case WPT_ACCESS:
2250 enable = 0x2;
2251 break;
2252 case WPT_WRITE:
2253 enable = 0x1;
2254 break;
2255 default:
2256 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2257 }
2258
2259 if (!xscale->dbr0_used)
2260 {
2261 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2262 dbcon_value |= enable;
2263 xscale_set_reg_u32(dbcon, dbcon_value);
2264 watchpoint->set = 1;
2265 xscale->dbr0_used = 1;
2266 }
2267 else if (!xscale->dbr1_used)
2268 {
2269 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2270 dbcon_value |= enable << 2;
2271 xscale_set_reg_u32(dbcon, dbcon_value);
2272 watchpoint->set = 2;
2273 xscale->dbr1_used = 1;
2274 }
2275 else
2276 {
2277 LOG_ERROR("BUG: no hardware comparator available");
2278 return ERROR_OK;
2279 }
2280
2281 return ERROR_OK;
2282 }
2283
2284 static int xscale_add_watchpoint(struct target *target,
2285 struct watchpoint *watchpoint)
2286 {
2287 struct xscale_common *xscale = target_to_xscale(target);
2288
2289 if (target->state != TARGET_HALTED)
2290 {
2291 LOG_WARNING("target not halted");
2292 return ERROR_TARGET_NOT_HALTED;
2293 }
2294
2295 if (xscale->dbr_available < 1)
2296 {
2297 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2298 }
2299
2300 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2301 {
2302 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2303 }
2304
2305 xscale->dbr_available--;
2306
2307 return ERROR_OK;
2308 }
2309
2310 static int xscale_unset_watchpoint(struct target *target,
2311 struct watchpoint *watchpoint)
2312 {
2313 struct xscale_common *xscale = target_to_xscale(target);
2314 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2315 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2316
2317 if (target->state != TARGET_HALTED)
2318 {
2319 LOG_WARNING("target not halted");
2320 return ERROR_TARGET_NOT_HALTED;
2321 }
2322
2323 if (!watchpoint->set)
2324 {
2325 LOG_WARNING("breakpoint not set");
2326 return ERROR_OK;
2327 }
2328
2329 if (watchpoint->set == 1)
2330 {
2331 dbcon_value &= ~0x3;
2332 xscale_set_reg_u32(dbcon, dbcon_value);
2333 xscale->dbr0_used = 0;
2334 }
2335 else if (watchpoint->set == 2)
2336 {
2337 dbcon_value &= ~0xc;
2338 xscale_set_reg_u32(dbcon, dbcon_value);
2339 xscale->dbr1_used = 0;
2340 }
2341 watchpoint->set = 0;
2342
2343 return ERROR_OK;
2344 }
2345
2346 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2347 {
2348 struct xscale_common *xscale = target_to_xscale(target);
2349
2350 if (target->state != TARGET_HALTED)
2351 {
2352 LOG_WARNING("target not halted");
2353 return ERROR_TARGET_NOT_HALTED;
2354 }
2355
2356 if (watchpoint->set)
2357 {
2358 xscale_unset_watchpoint(target, watchpoint);
2359 }
2360
2361 xscale->dbr_available++;
2362
2363 return ERROR_OK;
2364 }
2365
2366 static int xscale_get_reg(struct reg *reg)
2367 {
2368 struct xscale_reg *arch_info = reg->arch_info;
2369 struct target *target = arch_info->target;
2370 struct xscale_common *xscale = target_to_xscale(target);
2371
2372 /* DCSR, TX and RX are accessible via JTAG */
2373 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2374 {
2375 return xscale_read_dcsr(arch_info->target);
2376 }
2377 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2378 {
2379 /* 1 = consume register content */
2380 return xscale_read_tx(arch_info->target, 1);
2381 }
2382 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2383 {
2384 /* can't read from RX register (host -> debug handler) */
2385 return ERROR_OK;
2386 }
2387 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2388 {
2389 /* can't (explicitly) read from TXRXCTRL register */
2390 return ERROR_OK;
2391 }
2392 else /* Other DBG registers have to be transfered by the debug handler */
2393 {
2394 /* send CP read request (command 0x40) */
2395 xscale_send_u32(target, 0x40);
2396
2397 /* send CP register number */
2398 xscale_send_u32(target, arch_info->dbg_handler_number);
2399
2400 /* read register value */
2401 xscale_read_tx(target, 1);
2402 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2403
2404 reg->dirty = 0;
2405 reg->valid = 1;
2406 }
2407
2408 return ERROR_OK;
2409 }
2410
2411 static int xscale_set_reg(struct reg *reg, uint8_t* buf)
2412 {
2413 struct xscale_reg *arch_info = reg->arch_info;
2414 struct target *target = arch_info->target;
2415 struct xscale_common *xscale = target_to_xscale(target);
2416 uint32_t value = buf_get_u32(buf, 0, 32);
2417
2418 /* DCSR, TX and RX are accessible via JTAG */
2419 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2420 {
2421 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2422 return xscale_write_dcsr(arch_info->target, -1, -1);
2423 }
2424 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2425 {
2426 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2427 return xscale_write_rx(arch_info->target);
2428 }
2429 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2430 {
2431 /* can't write to TX register (debug-handler -> host) */
2432 return ERROR_OK;
2433 }
2434 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2435 {
2436 /* can't (explicitly) write to TXRXCTRL register */
2437 return ERROR_OK;
2438 }
2439 else /* Other DBG registers have to be transfered by the debug handler */
2440 {
2441 /* send CP write request (command 0x41) */
2442 xscale_send_u32(target, 0x41);
2443
2444 /* send CP register number */
2445 xscale_send_u32(target, arch_info->dbg_handler_number);
2446
2447 /* send CP register value */
2448 xscale_send_u32(target, value);
2449 buf_set_u32(reg->value, 0, 32, value);
2450 }
2451
2452 return ERROR_OK;
2453 }
2454
2455 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2456 {
2457 struct xscale_common *xscale = target_to_xscale(target);
2458 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2459 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2460
2461 /* send CP write request (command 0x41) */
2462 xscale_send_u32(target, 0x41);
2463
2464 /* send CP register number */
2465 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2466
2467 /* send CP register value */
2468 xscale_send_u32(target, value);
2469 buf_set_u32(dcsr->value, 0, 32, value);
2470
2471 return ERROR_OK;
2472 }
2473
2474 static int xscale_read_trace(struct target *target)
2475 {
2476 struct xscale_common *xscale = target_to_xscale(target);
2477 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
2478 struct xscale_trace_data **trace_data_p;
2479
2480 /* 258 words from debug handler
2481 * 256 trace buffer entries
2482 * 2 checkpoint addresses
2483 */
2484 uint32_t trace_buffer[258];
2485 int is_address[256];
2486 int i, j;
2487
2488 if (target->state != TARGET_HALTED)
2489 {
2490 LOG_WARNING("target must be stopped to read trace data");
2491 return ERROR_TARGET_NOT_HALTED;
2492 }
2493
2494 /* send read trace buffer command (command 0x61) */
2495 xscale_send_u32(target, 0x61);
2496
2497 /* receive trace buffer content */
2498 xscale_receive(target, trace_buffer, 258);
2499
2500 /* parse buffer backwards to identify address entries */
2501 for (i = 255; i >= 0; i--)
2502 {
2503 is_address[i] = 0;
2504 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2505 ((trace_buffer[i] & 0xf0) == 0xd0))
2506 {
2507 if (i >= 3)
2508 is_address[--i] = 1;
2509 if (i >= 2)
2510 is_address[--i] = 1;
2511 if (i >= 1)
2512 is_address[--i] = 1;
2513 if (i >= 0)
2514 is_address[--i] = 1;
2515 }
2516 }
2517
2518
2519 /* search first non-zero entry */
2520 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2521 ;
2522
2523 if (j == 256)
2524 {
2525 LOG_DEBUG("no trace data collected");
2526 return ERROR_XSCALE_NO_TRACE_DATA;
2527 }
2528
2529 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2530 ;
2531
2532 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2533 (*trace_data_p)->next = NULL;
2534 (*trace_data_p)->chkpt0 = trace_buffer[256];
2535 (*trace_data_p)->chkpt1 = trace_buffer[257];
2536 (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
2537 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2538 (*trace_data_p)->depth = 256 - j;
2539
2540 for (i = j; i < 256; i++)
2541 {
2542 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2543 if (is_address[i])
2544 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2545 else
2546 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2547 }
2548
2549 return ERROR_OK;
2550 }
2551
2552 static int xscale_read_instruction(struct target *target,
2553 struct arm_instruction *instruction)
2554 {
2555 struct xscale_common *xscale = target_to_xscale(target);
2556 int i;
2557 int section = -1;
2558 size_t size_read;
2559 uint32_t opcode;
2560 int retval;
2561
2562 if (!xscale->trace.image)
2563 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2564
2565 /* search for the section the current instruction belongs to */
2566 for (i = 0; i < xscale->trace.image->num_sections; i++)
2567 {
2568 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2569 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2570 {
2571 section = i;
2572 break;
2573 }
2574 }
2575
2576 if (section == -1)
2577 {
2578 /* current instruction couldn't be found in the image */
2579 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2580 }
2581
2582 if (xscale->trace.core_state == ARMV4_5_STATE_ARM)
2583 {
2584 uint8_t buf[4];
2585 if ((retval = image_read_section(xscale->trace.image, section,
2586 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2587 4, buf, &size_read)) != ERROR_OK)
2588 {
2589 LOG_ERROR("error while reading instruction: %i", retval);
2590 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2591 }
2592 opcode = target_buffer_get_u32(target, buf);
2593 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2594 }
2595 else if (xscale->trace.core_state == ARMV4_5_STATE_THUMB)
2596 {
2597 uint8_t buf[2];
2598 if ((retval = image_read_section(xscale->trace.image, section,
2599 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2600 2, buf, &size_read)) != ERROR_OK)
2601 {
2602 LOG_ERROR("error while reading instruction: %i", retval);
2603 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2604 }
2605 opcode = target_buffer_get_u16(target, buf);
2606 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2607 }
2608 else
2609 {
2610 LOG_ERROR("BUG: unknown core state encountered");
2611 exit(-1);
2612 }
2613
2614 return ERROR_OK;
2615 }
2616
2617 static int xscale_branch_address(struct xscale_trace_data *trace_data,
2618 int i, uint32_t *target)
2619 {
2620 /* if there are less than four entries prior to the indirect branch message
2621 * we can't extract the address */
2622 if (i < 4)
2623 {
2624 return -1;
2625 }
2626
2627 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2628 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2629
2630 return 0;
2631 }
2632
2633 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2634 {
2635 struct xscale_common *xscale = target_to_xscale(target);
2636 int next_pc_ok = 0;
2637 uint32_t next_pc = 0x0;
2638 struct xscale_trace_data *trace_data = xscale->trace.data;
2639 int retval;
2640
2641 while (trace_data)
2642 {
2643 int i, chkpt;
2644 int rollover;
2645 int branch;
2646 int exception;
2647 xscale->trace.core_state = ARMV4_5_STATE_ARM;
2648
2649 chkpt = 0;
2650 rollover = 0;
2651
2652 for (i = 0; i < trace_data->depth; i++)
2653 {
2654 next_pc_ok = 0;
2655 branch = 0;
2656 exception = 0;
2657
2658 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2659 continue;
2660
2661 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2662 {
2663 case 0: /* Exceptions */
2664 case 1:
2665 case 2:
2666 case 3:
2667 case 4:
2668 case 5:
2669 case 6:
2670 case 7:
2671 exception = (trace_data->entries[i].data & 0x70) >> 4;
2672 next_pc_ok = 1;
2673 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2674 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2675 break;
2676 case 8: /* Direct Branch */
2677 branch = 1;
2678 break;
2679 case 9: /* Indirect Branch */
2680 branch = 1;
2681 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2682 {
2683 next_pc_ok = 1;
2684 }
2685 break;
2686 case 13: /* Checkpointed Indirect Branch */
2687 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2688 {
2689 next_pc_ok = 1;
2690 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2691 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2692 LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2693 }
2694 /* explicit fall-through */
2695 case 12: /* Checkpointed Direct Branch */
2696 branch = 1;
2697 if (chkpt == 0)
2698 {
2699 next_pc_ok = 1;
2700 next_pc = trace_data->chkpt0;
2701 chkpt++;
2702 }
2703 else if (chkpt == 1)
2704 {
2705 next_pc_ok = 1;
2706 next_pc = trace_data->chkpt0;
2707 chkpt++;
2708 }
2709 else
2710 {
2711 LOG_WARNING("more than two checkpointed branches encountered");
2712 }
2713 break;
2714 case 15: /* Roll-over */
2715 rollover++;
2716 continue;
2717 default: /* Reserved */
2718 command_print(cmd_ctx, "--- reserved trace message ---");
2719 LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2720 return ERROR_OK;
2721 }
2722
2723 if (xscale->trace.pc_ok)
2724 {
2725 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2726 struct arm_instruction instruction;
2727
2728 if ((exception == 6) || (exception == 7))
2729 {
2730 /* IRQ or FIQ exception, no instruction executed */
2731 executed -= 1;
2732 }
2733
2734 while (executed-- >= 0)
2735 {
2736 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2737 {
2738 /* can't continue tracing with no image available */
2739 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2740 {
2741 return retval;
2742 }
2743 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2744 {
2745 /* TODO: handle incomplete images */
2746 }
2747 }
2748
2749 /* a precise abort on a load to the PC is included in the incremental
2750 * word count, other instructions causing data aborts are not included
2751 */
2752 if ((executed == 0) && (exception == 4)
2753 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2754 {
2755 if ((instruction.type == ARM_LDM)
2756 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2757 {
2758 executed--;
2759 }
2760 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2761 && (instruction.info.load_store.Rd != 15))
2762 {
2763 executed--;
2764 }
2765 }
2766
2767 /* only the last instruction executed
2768 * (the one that caused the control flow change)
2769 * could be a taken branch
2770 */
2771 if (((executed == -1) && (branch == 1)) &&
2772 (((instruction.type == ARM_B) ||
2773 (instruction.type == ARM_BL) ||
2774 (instruction.type == ARM_BLX)) &&
2775 (instruction.info.b_bl_bx_blx.target_address != 0xffffffff)))
2776 {
2777 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2778 }
2779 else
2780 {
2781 xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2;
2782 }
2783 command_print(cmd_ctx, "%s", instruction.text);
2784 }
2785
2786 rollover = 0;
2787 }
2788
2789 if (next_pc_ok)
2790 {
2791 xscale->trace.current_pc = next_pc;
2792 xscale->trace.pc_ok = 1;
2793 }
2794 }
2795
2796 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2)
2797 {
2798 struct arm_instruction instruction;
2799 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2800 {
2801 /* can't continue tracing with no image available */
2802 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2803 {
2804 return retval;
2805 }
2806 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2807 {
2808 /* TODO: handle incomplete images */
2809 }
2810 }
2811 command_print(cmd_ctx, "%s", instruction.text);
2812 }
2813
2814 trace_data = trace_data->next;
2815 }
2816
2817 return ERROR_OK;
2818 }
2819
2820 static const struct reg_arch_type xscale_reg_type = {
2821 .get = xscale_get_reg,
2822 .set = xscale_set_reg,
2823 };
2824
2825 static void xscale_build_reg_cache(struct target *target)
2826 {
2827 struct xscale_common *xscale = target_to_xscale(target);
2828 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
2829 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2830 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2831 int i;
2832 int num_regs = sizeof(xscale_reg_arch_info) / sizeof(struct xscale_reg);
2833
2834 (*cache_p) = armv4_5_build_reg_cache(target, armv4_5);
2835 armv4_5->core_cache = (*cache_p);
2836
2837 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2838 cache_p = &(*cache_p)->next;
2839
2840 /* fill in values for the xscale reg cache */
2841 (*cache_p)->name = "XScale registers";
2842 (*cache_p)->next = NULL;
2843 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
2844 (*cache_p)->num_regs = num_regs;
2845
2846 for (i = 0; i < num_regs; i++)
2847 {
2848 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2849 (*cache_p)->reg_list[i].value = calloc(4, 1);
2850 (*cache_p)->reg_list[i].dirty = 0;
2851 (*cache_p)->reg_list[i].valid = 0;
2852 (*cache_p)->reg_list[i].size = 32;
2853 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2854 (*cache_p)->reg_list[i].type = &xscale_reg_type;
2855 arch_info[i] = xscale_reg_arch_info[i];
2856 arch_info[i].target = target;
2857 }
2858
2859 xscale->reg_cache = (*cache_p);
2860 }
2861
2862 static int xscale_init_target(struct command_context *cmd_ctx,
2863 struct target *target)
2864 {
2865 xscale_build_reg_cache(target);
2866 return ERROR_OK;
2867 }
2868
2869 static int xscale_init_arch_info(struct target *target,
2870 struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
2871 {
2872 struct arm *armv4_5;
2873 uint32_t high_reset_branch, low_reset_branch;
2874 int i;
2875
2876 armv4_5 = &xscale->armv4_5_common;
2877
2878 /* store architecture specfic data (none so far) */
2879 xscale->common_magic = XSCALE_COMMON_MAGIC;
2880
2881 /* we don't really *need* variant info ... */
2882 if (variant) {
2883 int ir_length = 0;
2884
2885 if (strcmp(variant, "pxa250") == 0
2886 || strcmp(variant, "pxa255") == 0
2887 || strcmp(variant, "pxa26x") == 0)
2888 ir_length = 5;
2889 else if (strcmp(variant, "pxa27x") == 0
2890 || strcmp(variant, "ixp42x") == 0
2891 || strcmp(variant, "ixp45x") == 0
2892 || strcmp(variant, "ixp46x") == 0)
2893 ir_length = 7;
2894 else
2895 LOG_WARNING("%s: unrecognized variant %s",
2896 tap->dotted_name, variant);
2897
2898 if (ir_length && ir_length != tap->ir_length) {
2899 LOG_WARNING("%s: IR length for %s is %d; fixing",
2900 tap->dotted_name, variant, ir_length);
2901 tap->ir_length = ir_length;
2902 }
2903 }
2904
2905 /* the debug handler isn't installed (and thus not running) at this time */
2906 xscale->handler_address = 0xfe000800;
2907
2908 /* clear the vectors we keep locally for reference */
2909 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2910 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2911
2912 /* no user-specified vectors have been configured yet */
2913 xscale->static_low_vectors_set = 0x0;
2914 xscale->static_high_vectors_set = 0x0;
2915
2916 /* calculate branches to debug handler */
2917 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
2918 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
2919
2920 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
2921 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
2922
2923 for (i = 1; i <= 7; i++)
2924 {
2925 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2926 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2927 }
2928
2929 /* 64kB aligned region used for DCache cleaning */
2930 xscale->cache_clean_address = 0xfffe0000;
2931
2932 xscale->hold_rst = 0;
2933 xscale->external_debug_break = 0;
2934
2935 xscale->ibcr_available = 2;
2936 xscale->ibcr0_used = 0;
2937 xscale->ibcr1_used = 0;
2938
2939 xscale->dbr_available = 2;
2940 xscale->dbr0_used = 0;
2941 xscale->dbr1_used = 0;
2942
2943 xscale->arm_bkpt = ARMV5_BKPT(0x0);
2944 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
2945
2946 xscale->vector_catch = 0x1;
2947
2948 xscale->trace.capture_status = TRACE_IDLE;
2949 xscale->trace.data = NULL;
2950 xscale->trace.image = NULL;
2951 xscale->trace.buffer_enabled = 0;
2952 xscale->trace.buffer_fill = 0;
2953
2954 /* prepare ARMv4/5 specific information */
2955 armv4_5->arch_info = xscale;
2956 armv4_5->read_core_reg = xscale_read_core_reg;
2957 armv4_5->write_core_reg = xscale_write_core_reg;
2958 armv4_5->full_context = xscale_full_context;
2959
2960 armv4_5_init_arch_info(target, armv4_5);
2961
2962 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
2963 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
2964 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
2965 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
2966 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
2967 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
2968 xscale->armv4_5_mmu.has_tiny_pages = 1;
2969 xscale->armv4_5_mmu.mmu_enabled = 0;
2970
2971 return ERROR_OK;
2972 }
2973
2974 static int xscale_target_create(struct target *target, Jim_Interp *interp)
2975 {
2976 struct xscale_common *xscale;
2977
2978 if (sizeof xscale_debug_handler - 1 > 0x800) {
2979 LOG_ERROR("debug_handler.bin: larger than 2kb");
2980 return ERROR_FAIL;
2981 }
2982
2983 xscale = calloc(1, sizeof(*xscale));
2984 if (!xscale)
2985 return ERROR_FAIL;
2986
2987 return xscale_init_arch_info(target, xscale, target->tap,
2988 target->variant);
2989 }
2990
2991 COMMAND_HANDLER(xscale_handle_debug_handler_command)
2992 {
2993 struct target *target = NULL;
2994 struct xscale_common *xscale;
2995 int retval;
2996 uint32_t handler_address;
2997
2998 if (CMD_ARGC < 2)
2999 {
3000 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3001 return ERROR_OK;
3002 }
3003
3004 if ((target = get_target(CMD_ARGV[0])) == NULL)
3005 {
3006 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3007 return ERROR_FAIL;
3008 }
3009
3010 xscale = target_to_xscale(target);
3011 retval = xscale_verify_pointer(cmd_ctx, xscale);
3012 if (retval != ERROR_OK)
3013 return retval;
3014
3015 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3016
3017 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3018 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3019 {
3020 xscale->handler_address = handler_address;
3021 }
3022 else
3023 {
3024 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3025 return ERROR_FAIL;
3026 }
3027
3028 return ERROR_OK;
3029 }
3030
3031 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3032 {
3033 struct target *target = NULL;
3034 struct xscale_common *xscale;
3035 int retval;
3036 uint32_t cache_clean_address;
3037
3038 if (CMD_ARGC < 2)
3039 {
3040 return ERROR_COMMAND_SYNTAX_ERROR;
3041 }
3042
3043 target = get_target(CMD_ARGV[0]);
3044 if (target == NULL)
3045 {
3046 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3047 return ERROR_FAIL;
3048 }
3049 xscale = target_to_xscale(target);
3050 retval = xscale_verify_pointer(cmd_ctx, xscale);
3051 if (retval != ERROR_OK)
3052 return retval;
3053
3054 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3055
3056 if (cache_clean_address & 0xffff)
3057 {
3058 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3059 }
3060 else
3061 {
3062 xscale->cache_clean_address = cache_clean_address;
3063 }
3064
3065 return ERROR_OK;
3066 }
3067
3068 COMMAND_HANDLER(xscale_handle_cache_info_command)
3069 {
3070 struct target *target = get_current_target(cmd_ctx);
3071 struct xscale_common *xscale = target_to_xscale(target);
3072 int retval;
3073
3074 retval = xscale_verify_pointer(cmd_ctx, xscale);
3075 if (retval != ERROR_OK)
3076 return retval;
3077
3078 return armv4_5_handle_cache_info_command(cmd_ctx, &xscale->armv4_5_mmu.armv4_5_cache);
3079 }
3080
3081 static int xscale_virt2phys(struct target *target,
3082 uint32_t virtual, uint32_t *physical)
3083 {
3084 struct xscale_common *xscale = target_to_xscale(target);
3085 int type;
3086 uint32_t cb;
3087 int domain;
3088 uint32_t ap;
3089
3090 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3091 LOG_ERROR(xscale_not);
3092 return ERROR_TARGET_INVALID;
3093 }
3094
3095 uint32_t ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3096 if (type == -1)
3097 {
3098 return ret;
3099 }
3100 *physical = ret;
3101 return ERROR_OK;
3102 }
3103
3104 static int xscale_mmu(struct target *target, int *enabled)
3105 {
3106 struct xscale_common *xscale = target_to_xscale(target);
3107
3108 if (target->state != TARGET_HALTED)
3109 {
3110 LOG_ERROR("Target not halted");
3111 return ERROR_TARGET_INVALID;
3112 }
3113 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3114 return ERROR_OK;
3115 }
3116
3117 COMMAND_HANDLER(xscale_handle_mmu_command)
3118 {
3119 struct target *target = get_current_target(cmd_ctx);
3120 struct xscale_common *xscale = target_to_xscale(target);
3121 int retval;
3122
3123 retval = xscale_verify_pointer(cmd_ctx, xscale);
3124 if (retval != ERROR_OK)
3125 return retval;
3126
3127 if (target->state != TARGET_HALTED)
3128 {
3129 command_print(cmd_ctx, "target must be stopped for \"%s\" command", CMD_NAME);
3130 return ERROR_OK;
3131 }
3132
3133 if (CMD_ARGC >= 1)
3134 {
3135 if (strcmp("enable", CMD_ARGV[0]) == 0)
3136 {
3137 xscale_enable_mmu_caches(target, 1, 0, 0);
3138 xscale->armv4_5_mmu.mmu_enabled = 1;
3139 }
3140 else if (strcmp("disable", CMD_ARGV[0]) == 0)
3141 {
3142 xscale_disable_mmu_caches(target, 1, 0, 0);
3143 xscale->armv4_5_mmu.mmu_enabled = 0;
3144 }
3145 }
3146
3147 command_print(cmd_ctx, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3148
3149 return ERROR_OK;
3150 }
3151
3152 COMMAND_HANDLER(xscale_handle_idcache_command)
3153 {
3154 struct target *target = get_current_target(cmd_ctx);
3155 struct xscale_common *xscale = target_to_xscale(target);
3156 int icache = 0, dcache = 0;
3157 int retval;
3158
3159 retval = xscale_verify_pointer(cmd_ctx, xscale);
3160 if (retval != ERROR_OK)
3161 return retval;
3162
3163 if (target->state != TARGET_HALTED)
3164 {
3165 command_print(cmd_ctx, "target must be stopped for \"%s\" command", CMD_NAME);
3166 return ERROR_OK;
3167 }
3168
3169 if (strcmp(CMD_NAME, "icache") == 0)
3170 icache = 1;
3171 else if (strcmp(CMD_NAME, "dcache") == 0)
3172 dcache = 1;
3173
3174 if (CMD_ARGC >= 1)
3175 {
3176 if (strcmp("enable", CMD_ARGV[0]) == 0)
3177 {
3178 xscale_enable_mmu_caches(target, 0, dcache, icache);
3179
3180 if (icache)
3181 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 1;
3182 else if (dcache)
3183 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 1;
3184 }
3185 else if (strcmp("disable", CMD_ARGV[0]) == 0)
3186 {
3187 xscale_disable_mmu_caches(target, 0, dcache, icache);
3188
3189 if (icache)
3190 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 0;
3191 else if (dcache)
3192 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 0;
3193 }
3194 }
3195
3196 if (icache)
3197 command_print(cmd_ctx, "icache %s", (xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled) ? "enabled" : "disabled");
3198
3199 if (dcache)
3200 command_print(cmd_ctx, "dcache %s", (xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled) ? "enabled" : "disabled");
3201
3202 return ERROR_OK;
3203 }
3204
3205 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3206 {
3207 struct target *target = get_current_target(cmd_ctx);
3208 struct xscale_common *xscale = target_to_xscale(target);
3209 int retval;
3210
3211 retval = xscale_verify_pointer(cmd_ctx, xscale);
3212 if (retval != ERROR_OK)
3213 return retval;
3214
3215 if (CMD_ARGC < 1)
3216 {
3217 command_print(cmd_ctx, "usage: xscale vector_catch [mask]");
3218 }
3219 else
3220 {
3221 COMMAND_PARSE_NUMBER(u8, CMD_ARGV[0], xscale->vector_catch);
3222 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3223 xscale_write_dcsr(target, -1, -1);
3224 }
3225
3226 command_print(cmd_ctx, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3227
3228 return ERROR_OK;
3229 }
3230
3231
3232 COMMAND_HANDLER(xscale_handle_vector_table_command)
3233 {
3234 struct target *target = get_current_target(cmd_ctx);
3235 struct xscale_common *xscale = target_to_xscale(target);
3236 int err = 0;
3237 int retval;
3238
3239 retval = xscale_verify_pointer(cmd_ctx, xscale);
3240 if (retval != ERROR_OK)
3241 return retval;
3242
3243 if (CMD_ARGC == 0) /* print current settings */
3244 {
3245 int idx;
3246
3247 command_print(cmd_ctx, "active user-set static vectors:");
3248 for (idx = 1; idx < 8; idx++)
3249 if (xscale->static_low_vectors_set & (1 << idx))
3250 command_print(cmd_ctx, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
3251 for (idx = 1; idx < 8; idx++)
3252 if (xscale->static_high_vectors_set & (1 << idx))
3253 command_print(cmd_ctx, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
3254 return ERROR_OK;
3255 }
3256
3257 if (CMD_ARGC != 3)
3258 err = 1;
3259 else
3260 {
3261 int idx;
3262 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3263 uint32_t vec;
3264 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3265
3266 if (idx < 1 || idx >= 8)
3267 err = 1;
3268
3269 if (!err && strcmp(CMD_ARGV[0], "low") == 0)
3270 {
3271 xscale->static_low_vectors_set |= (1<<idx);
3272 xscale->static_low_vectors[idx] = vec;
3273 }
3274 else if (!err && (strcmp(CMD_ARGV[0], "high") == 0))
3275 {
3276 xscale->static_high_vectors_set |= (1<<idx);
3277 xscale->static_high_vectors[idx] = vec;
3278 }
3279 else
3280 err = 1;
3281 }
3282
3283 if (err)
3284 command_print(cmd_ctx, "usage: xscale vector_table <high|low> <index> <code>");
3285
3286 return ERROR_OK;
3287 }
3288
3289
3290 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3291 {
3292 struct target *target = get_current_target(cmd_ctx);
3293 struct xscale_common *xscale = target_to_xscale(target);
3294 struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
3295 uint32_t dcsr_value;
3296 int retval;
3297
3298 retval = xscale_verify_pointer(cmd_ctx, xscale);
3299 if (retval != ERROR_OK)
3300 return retval;
3301
3302 if (target->state != TARGET_HALTED)
3303 {
3304 command_print(cmd_ctx, "target must be stopped for \"%s\" command", CMD_NAME);
3305 return ERROR_OK;
3306 }
3307
3308 if ((CMD_ARGC >= 1) && (strcmp("enable", CMD_ARGV[0]) == 0))
3309 {
3310 struct xscale_trace_data *td, *next_td;
3311 xscale->trace.buffer_enabled = 1;
3312
3313 /* free old trace data */
3314 td = xscale->trace.data;
3315 while (td)
3316 {
3317 next_td = td->next;
3318
3319 if (td->entries)
3320 free(td->entries);
3321 free(td);
3322 td = next_td;
3323 }
3324 xscale->trace.data = NULL;
3325 }
3326 else if ((CMD_ARGC >= 1) && (strcmp("disable", CMD_ARGV[0]) == 0))
3327 {
3328 xscale->trace.buffer_enabled = 0;
3329 }
3330
3331 if ((CMD_ARGC >= 2) && (strcmp("fill", CMD_ARGV[1]) == 0))
3332 {
3333 uint32_t fill = 1;
3334 if (CMD_ARGC >= 3)
3335 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], fill);
3336 xscale->trace.buffer_fill = fill;
3337 }
3338 else if ((CMD_ARGC >= 2) && (strcmp("wrap", CMD_ARGV[1]) == 0))
3339 {
3340 xscale->trace.buffer_fill = -1;
3341 }
3342
3343 if (xscale->trace.buffer_enabled)
3344 {
3345 /* if we enable the trace buffer in fill-once
3346 * mode we know the address of the first instruction */
3347 xscale->trace.pc_ok = 1;
3348 xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
3349 }
3350 else
3351 {
3352 /* otherwise the address is unknown, and we have no known good PC */
3353 xscale->trace.pc_ok = 0;
3354 }
3355
3356 command_print(cmd_ctx, "trace buffer %s (%s)",
3357 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3358 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3359
3360 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3361 if (xscale->trace.buffer_fill >= 0)
3362 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3363 else
3364 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3365
3366 return ERROR_OK;
3367 }
3368
3369 COMMAND_HANDLER(xscale_handle_trace_image_command)
3370 {
3371 struct target *target = get_current_target(cmd_ctx);
3372 struct xscale_common *xscale = target_to_xscale(target);
3373 int retval;
3374
3375 if (CMD_ARGC < 1)
3376 {
3377 command_print(cmd_ctx, "usage: xscale trace_image <file> [base address] [type]");
3378 return ERROR_OK;
3379 }
3380
3381 retval = xscale_verify_pointer(cmd_ctx, xscale);
3382 if (retval != ERROR_OK)
3383 return retval;
3384
3385 if (xscale->trace.image)
3386 {
3387 image_close(xscale->trace.image);
3388 free(xscale->trace.image);
3389 command_print(cmd_ctx, "previously loaded image found and closed");
3390 }
3391
3392 xscale->trace.image = malloc(sizeof(struct image));
3393 xscale->trace.image->base_address_set = 0;
3394 xscale->trace.image->start_address_set = 0;
3395
3396 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3397 if (CMD_ARGC >= 2)
3398 {
3399 xscale->trace.image->base_address_set = 1;
3400 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], xscale->trace.image->base_address);
3401 }
3402 else
3403 {
3404 xscale->trace.image->base_address_set = 0;
3405 }
3406
3407 if (image_open(xscale->trace.image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3408 {
3409 free(xscale->trace.image);
3410 xscale->trace.image = NULL;
3411 return ERROR_OK;
3412 }
3413
3414 return ERROR_OK;
3415 }
3416
3417 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3418 {
3419 struct target *target = get_current_target(cmd_ctx);
3420 struct xscale_common *xscale = target_to_xscale(target);
3421 struct xscale_trace_data *trace_data;
3422 struct fileio file;
3423 int retval;
3424
3425 retval = xscale_verify_pointer(cmd_ctx, xscale);
3426 if (retval != ERROR_OK)
3427 return retval;
3428
3429 if (target->state != TARGET_HALTED)
3430 {
3431 command_print(cmd_ctx, "target must be stopped for \"%s\" command", CMD_NAME);
3432 return ERROR_OK;
3433 }
3434
3435 if (CMD_ARGC < 1)
3436 {
3437 command_print(cmd_ctx, "usage: xscale dump_trace <file>");
3438 return ERROR_OK;
3439 }
3440
3441 trace_data = xscale->trace.data;
3442
3443 if (!trace_data)
3444 {
3445 command_print(cmd_ctx, "no trace data collected");
3446 return ERROR_OK;
3447 }
3448
3449 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3450 {
3451 return ERROR_OK;
3452 }
3453
3454 while (trace_data)
3455 {
3456 int i;
3457
3458 fileio_write_u32(&file, trace_data->chkpt0);
3459 fileio_write_u32(&file, trace_data->chkpt1);
3460 fileio_write_u32(&file, trace_data->last_instruction);
3461 fileio_write_u32(&file, trace_data->depth);
3462
3463 for (i = 0; i < trace_data->depth; i++)
3464 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3465
3466 trace_data = trace_data->next;
3467 }
3468
3469 fileio_close(&file);
3470
3471 return ERROR_OK;
3472 }
3473
3474 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3475 {
3476 struct target *target = get_current_target(cmd_ctx);
3477 struct xscale_common *xscale = target_to_xscale(target);
3478 int retval;
3479
3480 retval = xscale_verify_pointer(cmd_ctx, xscale);
3481 if (retval != ERROR_OK)
3482 return retval;
3483
3484 xscale_analyze_trace(target, cmd_ctx);
3485
3486 return ERROR_OK;
3487 }
3488
3489 COMMAND_HANDLER(xscale_handle_cp15)
3490 {
3491 struct target *target = get_current_target(cmd_ctx);
3492 struct xscale_common *xscale = target_to_xscale(target);
3493 int retval;
3494
3495 retval = xscale_verify_pointer(cmd_ctx, xscale);
3496 if (retval != ERROR_OK)
3497 return retval;
3498
3499 if (target->state != TARGET_HALTED)
3500 {
3501 command_print(cmd_ctx, "target must be stopped for \"%s\" command", CMD_NAME);
3502 return ERROR_OK;
3503 }
3504 uint32_t reg_no = 0;
3505 struct reg *reg = NULL;
3506 if (CMD_ARGC > 0)
3507 {
3508 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3509 /*translate from xscale cp15 register no to openocd register*/
3510 switch (reg_no)
3511 {
3512 case 0:
3513 reg_no = XSCALE_MAINID;
3514 break;
3515 case 1:
3516 reg_no = XSCALE_CTRL;
3517 break;
3518 case 2:
3519 reg_no = XSCALE_TTB;
3520 break;
3521 case 3:
3522 reg_no = XSCALE_DAC;
3523 break;
3524 case 5:
3525 reg_no = XSCALE_FSR;
3526 break;
3527 case 6:
3528 reg_no = XSCALE_FAR;
3529 break;
3530 case 13:
3531 reg_no = XSCALE_PID;
3532 break;
3533 case 15:
3534 reg_no = XSCALE_CPACCESS;
3535 break;
3536 default:
3537 command_print(cmd_ctx, "invalid register number");
3538 return ERROR_INVALID_ARGUMENTS;
3539 }
3540 reg = &xscale->reg_cache->reg_list[reg_no];
3541
3542 }
3543 if (CMD_ARGC == 1)
3544 {
3545 uint32_t value;
3546
3547 /* read cp15 control register */
3548 xscale_get_reg(reg);
3549 value = buf_get_u32(reg->value, 0, 32);
3550 command_print(cmd_ctx, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
3551 }
3552 else if (CMD_ARGC == 2)
3553 {
3554 uint32_t value;
3555 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3556
3557 /* send CP write request (command 0x41) */
3558 xscale_send_u32(target, 0x41);
3559
3560 /* send CP register number */
3561 xscale_send_u32(target, reg_no);
3562
3563 /* send CP register value */
3564 xscale_send_u32(target, value);
3565
3566 /* execute cpwait to ensure outstanding operations complete */
3567 xscale_send_u32(target, 0x53);
3568 }
3569 else
3570 {
3571 command_print(cmd_ctx, "usage: cp15 [register]<, [value]>");
3572 }
3573
3574 return ERROR_OK;
3575 }
3576
3577 static int xscale_register_commands(struct command_context *cmd_ctx)
3578 {
3579 struct command *xscale_cmd;
3580
3581 xscale_cmd = register_command(cmd_ctx, NULL, "xscale", NULL, COMMAND_ANY, "xscale specific commands");
3582
3583 register_command(cmd_ctx, xscale_cmd, "debug_handler", xscale_handle_debug_handler_command, COMMAND_ANY, "'xscale debug_handler <target#> <address>' command takes two required operands");
3584 register_command(cmd_ctx, xscale_cmd, "cache_clean_address", xscale_handle_cache_clean_address_command, COMMAND_ANY, NULL);
3585
3586 register_command(cmd_ctx, xscale_cmd, "cache_info", xscale_handle_cache_info_command, COMMAND_EXEC, NULL);
3587 register_command(cmd_ctx, xscale_cmd, "mmu", xscale_handle_mmu_command, COMMAND_EXEC, "['enable'|'disable'] the MMU");
3588 register_command(cmd_ctx, xscale_cmd, "icache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the ICache");
3589 register_command(cmd_ctx, xscale_cmd, "dcache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the DCache");
3590
3591 register_command(cmd_ctx, xscale_cmd, "vector_catch", xscale_handle_vector_catch_command, COMMAND_EXEC, "<mask> of vectors that should be catched");
3592 register_command(cmd_ctx, xscale_cmd, "vector_table", xscale_handle_vector_table_command, COMMAND_EXEC, "<high|low> <index> <code> set static code for exception handler entry");
3593
3594 register_command(cmd_ctx, xscale_cmd, "trace_buffer", xscale_handle_trace_buffer_command, COMMAND_EXEC, "<enable | disable> ['fill' [n]|'wrap']");
3595
3596 register_command(cmd_ctx, xscale_cmd, "dump_trace", xscale_handle_dump_trace_command, COMMAND_EXEC, "dump content of trace buffer to <file>");
3597 register_command(cmd_ctx, xscale_cmd, "analyze_trace", xscale_handle_analyze_trace_buffer_command, COMMAND_EXEC, "analyze content of trace buffer");
3598 register_command(cmd_ctx, xscale_cmd, "trace_image", xscale_handle_trace_image_command,
3599 COMMAND_EXEC, "load image from <file> [base address]");
3600
3601 register_command(cmd_ctx, xscale_cmd, "cp15", xscale_handle_cp15, COMMAND_EXEC, "access coproc 15 <register> [value]");
3602
3603 armv4_5_register_commands(cmd_ctx);
3604
3605 return ERROR_OK;
3606 }
3607
3608 struct target_type xscale_target =
3609 {
3610 .name = "xscale",
3611
3612 .poll = xscale_poll,
3613 .arch_state = xscale_arch_state,
3614
3615 .target_request_data = NULL,
3616
3617 .halt = xscale_halt,
3618 .resume = xscale_resume,
3619 .step = xscale_step,
3620
3621 .assert_reset = xscale_assert_reset,
3622 .deassert_reset = xscale_deassert_reset,
3623 .soft_reset_halt = NULL,
3624
3625 .get_gdb_reg_list = armv4_5_get_gdb_reg_list,
3626
3627 .read_memory = xscale_read_memory,
3628 .write_memory = xscale_write_memory,
3629 .bulk_write_memory = xscale_bulk_write_memory,
3630
3631 .checksum_memory = arm_checksum_memory,
3632 .blank_check_memory = arm_blank_check_memory,
3633
3634 .run_algorithm = armv4_5_run_algorithm,
3635
3636 .add_breakpoint = xscale_add_breakpoint,
3637 .remove_breakpoint = xscale_remove_breakpoint,
3638 .add_watchpoint = xscale_add_watchpoint,
3639 .remove_watchpoint = xscale_remove_watchpoint,
3640
3641 .register_commands = xscale_register_commands,
3642 .target_create = xscale_target_create,
3643 .init_target = xscale_init_target,
3644
3645 .virt2phys = xscale_virt2phys,
3646 .mmu = xscale_mmu
3647 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)