target: make it absolutely clear that no null pointers are accepted
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
29
30 #include "breakpoints.h"
31 #include "xscale.h"
32 #include "target_type.h"
33 #include "arm_jtag.h"
34 #include "arm_simulator.h"
35 #include "arm_disassembler.h"
36 #include <helper/time_support.h>
37 #include "register.h"
38 #include "image.h"
39 #include "arm_opcodes.h"
40 #include "armv4_5.h"
41
42
43 /*
44 * Important XScale documents available as of October 2009 include:
45 *
46 * Intel XScale® Core Developer’s Manual, January 2004
47 * Order Number: 273473-002
48 * This has a chapter detailing debug facilities, and punts some
49 * details to chip-specific microarchitecture documents.
50 *
51 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
52 * Document Number: 273539-005
53 * Less detailed than the developer's manual, but summarizes those
54 * missing details (for most XScales) and gives LOTS of notes about
55 * debugger/handler interaction issues. Presents a simpler reset
56 * and load-handler sequence than the arch doc. (Note, OpenOCD
57 * doesn't currently support "Hot-Debug" as defined there.)
58 *
59 * Chip-specific microarchitecture documents may also be useful.
60 */
61
62
63 /* forward declarations */
64 static int xscale_resume(struct target *, int current,
65 uint32_t address, int handle_breakpoints, int debug_execution);
66 static int xscale_debug_entry(struct target *);
67 static int xscale_restore_banked(struct target *);
68 static int xscale_get_reg(struct reg *reg);
69 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
70 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
71 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
72 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
73 static int xscale_read_trace(struct target *);
74
75
76 /* This XScale "debug handler" is loaded into the processor's
77 * mini-ICache, which is 2K of code writable only via JTAG.
78 *
79 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
80 * binary files cleanly. It's string oriented, and terminates them
81 * with a NUL character. Better would be to generate the constants
82 * and let other code decide names, scoping, and other housekeeping.
83 */
84 static /* unsigned const char xscale_debug_handler[] = ... */
85 #include "xscale_debug.h"
86
87 static char *const xscale_reg_list[] =
88 {
89 "XSCALE_MAINID", /* 0 */
90 "XSCALE_CACHETYPE",
91 "XSCALE_CTRL",
92 "XSCALE_AUXCTRL",
93 "XSCALE_TTB",
94 "XSCALE_DAC",
95 "XSCALE_FSR",
96 "XSCALE_FAR",
97 "XSCALE_PID",
98 "XSCALE_CPACCESS",
99 "XSCALE_IBCR0", /* 10 */
100 "XSCALE_IBCR1",
101 "XSCALE_DBR0",
102 "XSCALE_DBR1",
103 "XSCALE_DBCON",
104 "XSCALE_TBREG",
105 "XSCALE_CHKPT0",
106 "XSCALE_CHKPT1",
107 "XSCALE_DCSR",
108 "XSCALE_TX",
109 "XSCALE_RX", /* 20 */
110 "XSCALE_TXRXCTRL",
111 };
112
113 static const struct xscale_reg xscale_reg_arch_info[] =
114 {
115 {XSCALE_MAINID, NULL},
116 {XSCALE_CACHETYPE, NULL},
117 {XSCALE_CTRL, NULL},
118 {XSCALE_AUXCTRL, NULL},
119 {XSCALE_TTB, NULL},
120 {XSCALE_DAC, NULL},
121 {XSCALE_FSR, NULL},
122 {XSCALE_FAR, NULL},
123 {XSCALE_PID, NULL},
124 {XSCALE_CPACCESS, NULL},
125 {XSCALE_IBCR0, NULL},
126 {XSCALE_IBCR1, NULL},
127 {XSCALE_DBR0, NULL},
128 {XSCALE_DBR1, NULL},
129 {XSCALE_DBCON, NULL},
130 {XSCALE_TBREG, NULL},
131 {XSCALE_CHKPT0, NULL},
132 {XSCALE_CHKPT1, NULL},
133 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
134 {-1, NULL}, /* TX accessed via JTAG */
135 {-1, NULL}, /* RX accessed via JTAG */
136 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
137 };
138
139 /* convenience wrapper to access XScale specific registers */
140 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
141 {
142 uint8_t buf[4];
143
144 buf_set_u32(buf, 0, 32, value);
145
146 return xscale_set_reg(reg, buf);
147 }
148
149 static const char xscale_not[] = "target is not an XScale";
150
151 static int xscale_verify_pointer(struct command_context *cmd_ctx,
152 struct xscale_common *xscale)
153 {
154 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
155 command_print(cmd_ctx, xscale_not);
156 return ERROR_TARGET_INVALID;
157 }
158 return ERROR_OK;
159 }
160
161 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr, tap_state_t end_state)
162 {
163 assert (tap != NULL);
164
165 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
166 {
167 struct scan_field field;
168 uint8_t scratch[4];
169
170 memset(&field, 0, sizeof field);
171 field.num_bits = tap->ir_length;
172 field.out_value = scratch;
173 buf_set_u32(scratch, 0, field.num_bits, new_instr);
174
175 jtag_add_ir_scan(tap, &field, end_state);
176 }
177
178 return ERROR_OK;
179 }
180
181 static int xscale_read_dcsr(struct target *target)
182 {
183 struct xscale_common *xscale = target_to_xscale(target);
184 int retval;
185 struct scan_field fields[3];
186 uint8_t field0 = 0x0;
187 uint8_t field0_check_value = 0x2;
188 uint8_t field0_check_mask = 0x7;
189 uint8_t field2 = 0x0;
190 uint8_t field2_check_value = 0x0;
191 uint8_t field2_check_mask = 0x1;
192
193 xscale_jtag_set_instr(target->tap,
194 XSCALE_SELDCSR << xscale->xscale_variant,
195 TAP_DRPAUSE);
196
197 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
198 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
199
200 memset(&fields, 0, sizeof fields);
201
202 fields[0].num_bits = 3;
203 fields[0].out_value = &field0;
204 uint8_t tmp;
205 fields[0].in_value = &tmp;
206
207 fields[1].num_bits = 32;
208 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
209
210 fields[2].num_bits = 1;
211 fields[2].out_value = &field2;
212 uint8_t tmp2;
213 fields[2].in_value = &tmp2;
214
215 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
216
217 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
218 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
219
220 if ((retval = jtag_execute_queue()) != ERROR_OK)
221 {
222 LOG_ERROR("JTAG error while reading DCSR");
223 return retval;
224 }
225
226 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
227 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
228
229 /* write the register with the value we just read
230 * on this second pass, only the first bit of field0 is guaranteed to be 0)
231 */
232 field0_check_mask = 0x1;
233 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
234 fields[1].in_value = NULL;
235
236 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
237
238 /* DANGER!!! this must be here. It will make sure that the arguments
239 * to jtag_set_check_value() does not go out of scope! */
240 return jtag_execute_queue();
241 }
242
243
244 static void xscale_getbuf(jtag_callback_data_t arg)
245 {
246 uint8_t *in = (uint8_t *)arg;
247 *((uint32_t *)arg) = buf_get_u32(in, 0, 32);
248 }
249
250 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
251 {
252 if (num_words == 0)
253 return ERROR_INVALID_ARGUMENTS;
254
255 struct xscale_common *xscale = target_to_xscale(target);
256 int retval = ERROR_OK;
257 tap_state_t path[3];
258 struct scan_field fields[3];
259 uint8_t *field0 = malloc(num_words * 1);
260 uint8_t field0_check_value = 0x2;
261 uint8_t field0_check_mask = 0x6;
262 uint32_t *field1 = malloc(num_words * 4);
263 uint8_t field2_check_value = 0x0;
264 uint8_t field2_check_mask = 0x1;
265 int words_done = 0;
266 int words_scheduled = 0;
267 int i;
268
269 path[0] = TAP_DRSELECT;
270 path[1] = TAP_DRCAPTURE;
271 path[2] = TAP_DRSHIFT;
272
273 memset(&fields, 0, sizeof fields);
274
275 fields[0].num_bits = 3;
276 fields[0].check_value = &field0_check_value;
277 fields[0].check_mask = &field0_check_mask;
278
279 fields[1].num_bits = 32;
280
281 fields[2].num_bits = 1;
282 fields[2].check_value = &field2_check_value;
283 fields[2].check_mask = &field2_check_mask;
284
285 xscale_jtag_set_instr(target->tap,
286 XSCALE_DBGTX << xscale->xscale_variant,
287 TAP_IDLE);
288 jtag_add_runtest(1, TAP_IDLE); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
289
290 /* repeat until all words have been collected */
291 int attempts = 0;
292 while (words_done < num_words)
293 {
294 /* schedule reads */
295 words_scheduled = 0;
296 for (i = words_done; i < num_words; i++)
297 {
298 fields[0].in_value = &field0[i];
299
300 jtag_add_pathmove(3, path);
301
302 fields[1].in_value = (uint8_t *)(field1 + i);
303
304 jtag_add_dr_scan_check(target->tap, 3, fields, TAP_IDLE);
305
306 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
307
308 words_scheduled++;
309 }
310
311 if ((retval = jtag_execute_queue()) != ERROR_OK)
312 {
313 LOG_ERROR("JTAG error while receiving data from debug handler");
314 break;
315 }
316
317 /* examine results */
318 for (i = words_done; i < num_words; i++)
319 {
320 if (!(field0[i] & 1))
321 {
322 /* move backwards if necessary */
323 int j;
324 for (j = i; j < num_words - 1; j++)
325 {
326 field0[j] = field0[j + 1];
327 field1[j] = field1[j + 1];
328 }
329 words_scheduled--;
330 }
331 }
332 if (words_scheduled == 0)
333 {
334 if (attempts++==1000)
335 {
336 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
337 retval = ERROR_TARGET_TIMEOUT;
338 break;
339 }
340 }
341
342 words_done += words_scheduled;
343 }
344
345 for (i = 0; i < num_words; i++)
346 *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
347
348 free(field1);
349
350 return retval;
351 }
352
353 static int xscale_read_tx(struct target *target, int consume)
354 {
355 struct xscale_common *xscale = target_to_xscale(target);
356 tap_state_t path[3];
357 tap_state_t noconsume_path[6];
358 int retval;
359 struct timeval timeout, now;
360 struct scan_field fields[3];
361 uint8_t field0_in = 0x0;
362 uint8_t field0_check_value = 0x2;
363 uint8_t field0_check_mask = 0x6;
364 uint8_t field2_check_value = 0x0;
365 uint8_t field2_check_mask = 0x1;
366
367 xscale_jtag_set_instr(target->tap,
368 XSCALE_DBGTX << xscale->xscale_variant,
369 TAP_IDLE);
370
371 path[0] = TAP_DRSELECT;
372 path[1] = TAP_DRCAPTURE;
373 path[2] = TAP_DRSHIFT;
374
375 noconsume_path[0] = TAP_DRSELECT;
376 noconsume_path[1] = TAP_DRCAPTURE;
377 noconsume_path[2] = TAP_DREXIT1;
378 noconsume_path[3] = TAP_DRPAUSE;
379 noconsume_path[4] = TAP_DREXIT2;
380 noconsume_path[5] = TAP_DRSHIFT;
381
382 memset(&fields, 0, sizeof fields);
383
384 fields[0].num_bits = 3;
385 fields[0].in_value = &field0_in;
386
387 fields[1].num_bits = 32;
388 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
389
390 fields[2].num_bits = 1;
391 uint8_t tmp;
392 fields[2].in_value = &tmp;
393
394 gettimeofday(&timeout, NULL);
395 timeval_add_time(&timeout, 1, 0);
396
397 for (;;)
398 {
399 /* if we want to consume the register content (i.e. clear TX_READY),
400 * we have to go straight from Capture-DR to Shift-DR
401 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
402 */
403 if (consume)
404 jtag_add_pathmove(3, path);
405 else
406 {
407 jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
408 }
409
410 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
411
412 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
413 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
414
415 if ((retval = jtag_execute_queue()) != ERROR_OK)
416 {
417 LOG_ERROR("JTAG error while reading TX");
418 return ERROR_TARGET_TIMEOUT;
419 }
420
421 gettimeofday(&now, NULL);
422 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
423 {
424 LOG_ERROR("time out reading TX register");
425 return ERROR_TARGET_TIMEOUT;
426 }
427 if (!((!(field0_in & 1)) && consume))
428 {
429 goto done;
430 }
431 if (debug_level >= 3)
432 {
433 LOG_DEBUG("waiting 100ms");
434 alive_sleep(100); /* avoid flooding the logs */
435 } else
436 {
437 keep_alive();
438 }
439 }
440 done:
441
442 if (!(field0_in & 1))
443 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
444
445 return ERROR_OK;
446 }
447
448 static int xscale_write_rx(struct target *target)
449 {
450 struct xscale_common *xscale = target_to_xscale(target);
451 int retval;
452 struct timeval timeout, now;
453 struct scan_field fields[3];
454 uint8_t field0_out = 0x0;
455 uint8_t field0_in = 0x0;
456 uint8_t field0_check_value = 0x2;
457 uint8_t field0_check_mask = 0x6;
458 uint8_t field2 = 0x0;
459 uint8_t field2_check_value = 0x0;
460 uint8_t field2_check_mask = 0x1;
461
462 xscale_jtag_set_instr(target->tap,
463 XSCALE_DBGRX << xscale->xscale_variant,
464 TAP_IDLE);
465
466 memset(&fields, 0, sizeof fields);
467
468 fields[0].num_bits = 3;
469 fields[0].out_value = &field0_out;
470 fields[0].in_value = &field0_in;
471
472 fields[1].num_bits = 32;
473 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
474
475 fields[2].num_bits = 1;
476 fields[2].out_value = &field2;
477 uint8_t tmp;
478 fields[2].in_value = &tmp;
479
480 gettimeofday(&timeout, NULL);
481 timeval_add_time(&timeout, 1, 0);
482
483 /* poll until rx_read is low */
484 LOG_DEBUG("polling RX");
485 for (;;)
486 {
487 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
488
489 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
490 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
491
492 if ((retval = jtag_execute_queue()) != ERROR_OK)
493 {
494 LOG_ERROR("JTAG error while writing RX");
495 return retval;
496 }
497
498 gettimeofday(&now, NULL);
499 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
500 {
501 LOG_ERROR("time out writing RX register");
502 return ERROR_TARGET_TIMEOUT;
503 }
504 if (!(field0_in & 1))
505 goto done;
506 if (debug_level >= 3)
507 {
508 LOG_DEBUG("waiting 100ms");
509 alive_sleep(100); /* avoid flooding the logs */
510 } else
511 {
512 keep_alive();
513 }
514 }
515 done:
516
517 /* set rx_valid */
518 field2 = 0x1;
519 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
520
521 if ((retval = jtag_execute_queue()) != ERROR_OK)
522 {
523 LOG_ERROR("JTAG error while writing RX");
524 return retval;
525 }
526
527 return ERROR_OK;
528 }
529
530 /* send count elements of size byte to the debug handler */
531 static int xscale_send(struct target *target, const uint8_t *buffer, int count, int size)
532 {
533 struct xscale_common *xscale = target_to_xscale(target);
534 uint32_t t[3];
535 int bits[3];
536 int retval;
537 int done_count = 0;
538
539 xscale_jtag_set_instr(target->tap,
540 XSCALE_DBGRX << xscale->xscale_variant,
541 TAP_IDLE);
542
543 bits[0]=3;
544 t[0]=0;
545 bits[1]=32;
546 t[2]=1;
547 bits[2]=1;
548 int endianness = target->endianness;
549 while (done_count++ < count)
550 {
551 switch (size)
552 {
553 case 4:
554 if (endianness == TARGET_LITTLE_ENDIAN)
555 {
556 t[1]=le_to_h_u32(buffer);
557 } else
558 {
559 t[1]=be_to_h_u32(buffer);
560 }
561 break;
562 case 2:
563 if (endianness == TARGET_LITTLE_ENDIAN)
564 {
565 t[1]=le_to_h_u16(buffer);
566 } else
567 {
568 t[1]=be_to_h_u16(buffer);
569 }
570 break;
571 case 1:
572 t[1]=buffer[0];
573 break;
574 default:
575 LOG_ERROR("BUG: size neither 4, 2 nor 1");
576 return ERROR_INVALID_ARGUMENTS;
577 }
578 jtag_add_dr_out(target->tap,
579 3,
580 bits,
581 t,
582 TAP_IDLE);
583 buffer += size;
584 }
585
586 if ((retval = jtag_execute_queue()) != ERROR_OK)
587 {
588 LOG_ERROR("JTAG error while sending data to debug handler");
589 return retval;
590 }
591
592 return ERROR_OK;
593 }
594
595 static int xscale_send_u32(struct target *target, uint32_t value)
596 {
597 struct xscale_common *xscale = target_to_xscale(target);
598
599 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
600 return xscale_write_rx(target);
601 }
602
603 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
604 {
605 struct xscale_common *xscale = target_to_xscale(target);
606 int retval;
607 struct scan_field fields[3];
608 uint8_t field0 = 0x0;
609 uint8_t field0_check_value = 0x2;
610 uint8_t field0_check_mask = 0x7;
611 uint8_t field2 = 0x0;
612 uint8_t field2_check_value = 0x0;
613 uint8_t field2_check_mask = 0x1;
614
615 if (hold_rst != -1)
616 xscale->hold_rst = hold_rst;
617
618 if (ext_dbg_brk != -1)
619 xscale->external_debug_break = ext_dbg_brk;
620
621 xscale_jtag_set_instr(target->tap,
622 XSCALE_SELDCSR << xscale->xscale_variant,
623 TAP_IDLE);
624
625 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
626 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
627
628 memset(&fields, 0, sizeof fields);
629
630 fields[0].num_bits = 3;
631 fields[0].out_value = &field0;
632 uint8_t tmp;
633 fields[0].in_value = &tmp;
634
635 fields[1].num_bits = 32;
636 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
637
638 fields[2].num_bits = 1;
639 fields[2].out_value = &field2;
640 uint8_t tmp2;
641 fields[2].in_value = &tmp2;
642
643 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
644
645 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
646 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
647
648 if ((retval = jtag_execute_queue()) != ERROR_OK)
649 {
650 LOG_ERROR("JTAG error while writing DCSR");
651 return retval;
652 }
653
654 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
655 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
656
657 return ERROR_OK;
658 }
659
660 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
661 static unsigned int parity (unsigned int v)
662 {
663 // unsigned int ov = v;
664 v ^= v >> 16;
665 v ^= v >> 8;
666 v ^= v >> 4;
667 v &= 0xf;
668 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
669 return (0x6996 >> v) & 1;
670 }
671
672 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
673 {
674 struct xscale_common *xscale = target_to_xscale(target);
675 uint8_t packet[4];
676 uint8_t cmd;
677 int word;
678 struct scan_field fields[2];
679
680 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
681
682 /* LDIC into IR */
683 xscale_jtag_set_instr(target->tap,
684 XSCALE_LDIC << xscale->xscale_variant,
685 TAP_IDLE);
686
687 /* CMD is b011 to load a cacheline into the Mini ICache.
688 * Loading into the main ICache is deprecated, and unused.
689 * It's followed by three zero bits, and 27 address bits.
690 */
691 buf_set_u32(&cmd, 0, 6, 0x3);
692
693 /* virtual address of desired cache line */
694 buf_set_u32(packet, 0, 27, va >> 5);
695
696 memset(&fields, 0, sizeof fields);
697
698 fields[0].num_bits = 6;
699 fields[0].out_value = &cmd;
700
701 fields[1].num_bits = 27;
702 fields[1].out_value = packet;
703
704 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
705
706 /* rest of packet is a cacheline: 8 instructions, with parity */
707 fields[0].num_bits = 32;
708 fields[0].out_value = packet;
709
710 fields[1].num_bits = 1;
711 fields[1].out_value = &cmd;
712
713 for (word = 0; word < 8; word++)
714 {
715 buf_set_u32(packet, 0, 32, buffer[word]);
716
717 uint32_t value;
718 memcpy(&value, packet, sizeof(uint32_t));
719 cmd = parity(value);
720
721 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
722 }
723
724 return jtag_execute_queue();
725 }
726
727 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
728 {
729 struct xscale_common *xscale = target_to_xscale(target);
730 uint8_t packet[4];
731 uint8_t cmd;
732 struct scan_field fields[2];
733
734 xscale_jtag_set_instr(target->tap,
735 XSCALE_LDIC << xscale->xscale_variant,
736 TAP_IDLE);
737
738 /* CMD for invalidate IC line b000, bits [6:4] b000 */
739 buf_set_u32(&cmd, 0, 6, 0x0);
740
741 /* virtual address of desired cache line */
742 buf_set_u32(packet, 0, 27, va >> 5);
743
744 memset(&fields, 0, sizeof fields);
745
746 fields[0].num_bits = 6;
747 fields[0].out_value = &cmd;
748
749 fields[1].num_bits = 27;
750 fields[1].out_value = packet;
751
752 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
753
754 return ERROR_OK;
755 }
756
757 static int xscale_update_vectors(struct target *target)
758 {
759 struct xscale_common *xscale = target_to_xscale(target);
760 int i;
761 int retval;
762
763 uint32_t low_reset_branch, high_reset_branch;
764
765 for (i = 1; i < 8; i++)
766 {
767 /* if there's a static vector specified for this exception, override */
768 if (xscale->static_high_vectors_set & (1 << i))
769 {
770 xscale->high_vectors[i] = xscale->static_high_vectors[i];
771 }
772 else
773 {
774 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
775 if (retval == ERROR_TARGET_TIMEOUT)
776 return retval;
777 if (retval != ERROR_OK)
778 {
779 /* Some of these reads will fail as part of normal execution */
780 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
781 }
782 }
783 }
784
785 for (i = 1; i < 8; i++)
786 {
787 if (xscale->static_low_vectors_set & (1 << i))
788 {
789 xscale->low_vectors[i] = xscale->static_low_vectors[i];
790 }
791 else
792 {
793 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
794 if (retval == ERROR_TARGET_TIMEOUT)
795 return retval;
796 if (retval != ERROR_OK)
797 {
798 /* Some of these reads will fail as part of normal execution */
799 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
800 }
801 }
802 }
803
804 /* calculate branches to debug handler */
805 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
806 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
807
808 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
809 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
810
811 /* invalidate and load exception vectors in mini i-cache */
812 xscale_invalidate_ic_line(target, 0x0);
813 xscale_invalidate_ic_line(target, 0xffff0000);
814
815 xscale_load_ic(target, 0x0, xscale->low_vectors);
816 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
817
818 return ERROR_OK;
819 }
820
821 static int xscale_arch_state(struct target *target)
822 {
823 struct xscale_common *xscale = target_to_xscale(target);
824 struct arm *armv4_5 = &xscale->armv4_5_common;
825
826 static const char *state[] =
827 {
828 "disabled", "enabled"
829 };
830
831 static const char *arch_dbg_reason[] =
832 {
833 "", "\n(processor reset)", "\n(trace buffer full)"
834 };
835
836 if (armv4_5->common_magic != ARM_COMMON_MAGIC)
837 {
838 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
839 return ERROR_INVALID_ARGUMENTS;
840 }
841
842 arm_arch_state(target);
843 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s%s",
844 state[xscale->armv4_5_mmu.mmu_enabled],
845 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
846 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
847 arch_dbg_reason[xscale->arch_debug_reason]);
848
849 return ERROR_OK;
850 }
851
852 static int xscale_poll(struct target *target)
853 {
854 int retval = ERROR_OK;
855
856 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
857 {
858 enum target_state previous_state = target->state;
859 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
860 {
861
862 /* there's data to read from the tx register, we entered debug state */
863 target->state = TARGET_HALTED;
864
865 /* process debug entry, fetching current mode regs */
866 retval = xscale_debug_entry(target);
867 }
868 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
869 {
870 LOG_USER("error while polling TX register, reset CPU");
871 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
872 target->state = TARGET_HALTED;
873 }
874
875 /* debug_entry could have overwritten target state (i.e. immediate resume)
876 * don't signal event handlers in that case
877 */
878 if (target->state != TARGET_HALTED)
879 return ERROR_OK;
880
881 /* if target was running, signal that we halted
882 * otherwise we reentered from debug execution */
883 if (previous_state == TARGET_RUNNING)
884 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
885 else
886 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
887 }
888
889 return retval;
890 }
891
892 static int xscale_debug_entry(struct target *target)
893 {
894 struct xscale_common *xscale = target_to_xscale(target);
895 struct arm *armv4_5 = &xscale->armv4_5_common;
896 uint32_t pc;
897 uint32_t buffer[10];
898 unsigned i;
899 int retval;
900 uint32_t moe;
901
902 /* clear external dbg break (will be written on next DCSR read) */
903 xscale->external_debug_break = 0;
904 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
905 return retval;
906
907 /* get r0, pc, r1 to r7 and cpsr */
908 if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
909 return retval;
910
911 /* move r0 from buffer to register cache */
912 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
913 armv4_5->core_cache->reg_list[0].dirty = 1;
914 armv4_5->core_cache->reg_list[0].valid = 1;
915 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
916
917 /* move pc from buffer to register cache */
918 buf_set_u32(armv4_5->pc->value, 0, 32, buffer[1]);
919 armv4_5->pc->dirty = 1;
920 armv4_5->pc->valid = 1;
921 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
922
923 /* move data from buffer to register cache */
924 for (i = 1; i <= 7; i++)
925 {
926 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
927 armv4_5->core_cache->reg_list[i].dirty = 1;
928 armv4_5->core_cache->reg_list[i].valid = 1;
929 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
930 }
931
932 arm_set_cpsr(armv4_5, buffer[9]);
933 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
934
935 if (!is_arm_mode(armv4_5->core_mode))
936 {
937 target->state = TARGET_UNKNOWN;
938 LOG_ERROR("cpsr contains invalid mode value - communication failure");
939 return ERROR_TARGET_FAILURE;
940 }
941 LOG_DEBUG("target entered debug state in %s mode",
942 arm_mode_name(armv4_5->core_mode));
943
944 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
945 if (armv4_5->spsr) {
946 xscale_receive(target, buffer, 8);
947 buf_set_u32(armv4_5->spsr->value, 0, 32, buffer[7]);
948 armv4_5->spsr->dirty = false;
949 armv4_5->spsr->valid = true;
950 }
951 else
952 {
953 /* r8 to r14, but no spsr */
954 xscale_receive(target, buffer, 7);
955 }
956
957 /* move data from buffer to right banked register in cache */
958 for (i = 8; i <= 14; i++)
959 {
960 struct reg *r = arm_reg_current(armv4_5, i);
961
962 buf_set_u32(r->value, 0, 32, buffer[i - 8]);
963 r->dirty = false;
964 r->valid = true;
965 }
966
967 /* mark xscale regs invalid to ensure they are retrieved from the
968 * debug handler if requested */
969 for (i = 0; i < xscale->reg_cache->num_regs; i++)
970 xscale->reg_cache->reg_list[i].valid = 0;
971
972 /* examine debug reason */
973 xscale_read_dcsr(target);
974 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
975
976 /* stored PC (for calculating fixup) */
977 pc = buf_get_u32(armv4_5->pc->value, 0, 32);
978
979 switch (moe)
980 {
981 case 0x0: /* Processor reset */
982 target->debug_reason = DBG_REASON_DBGRQ;
983 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
984 pc -= 4;
985 break;
986 case 0x1: /* Instruction breakpoint hit */
987 target->debug_reason = DBG_REASON_BREAKPOINT;
988 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
989 pc -= 4;
990 break;
991 case 0x2: /* Data breakpoint hit */
992 target->debug_reason = DBG_REASON_WATCHPOINT;
993 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
994 pc -= 4;
995 break;
996 case 0x3: /* BKPT instruction executed */
997 target->debug_reason = DBG_REASON_BREAKPOINT;
998 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
999 pc -= 4;
1000 break;
1001 case 0x4: /* Ext. debug event */
1002 target->debug_reason = DBG_REASON_DBGRQ;
1003 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1004 pc -= 4;
1005 break;
1006 case 0x5: /* Vector trap occured */
1007 target->debug_reason = DBG_REASON_BREAKPOINT;
1008 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1009 pc -= 4;
1010 break;
1011 case 0x6: /* Trace buffer full break */
1012 target->debug_reason = DBG_REASON_DBGRQ;
1013 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1014 pc -= 4;
1015 break;
1016 case 0x7: /* Reserved (may flag Hot-Debug support) */
1017 default:
1018 LOG_ERROR("Method of Entry is 'Reserved'");
1019 exit(-1);
1020 break;
1021 }
1022
1023 /* apply PC fixup */
1024 buf_set_u32(armv4_5->pc->value, 0, 32, pc);
1025
1026 /* on the first debug entry, identify cache type */
1027 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1028 {
1029 uint32_t cache_type_reg;
1030
1031 /* read cp15 cache type register */
1032 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1033 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1034
1035 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1036 }
1037
1038 /* examine MMU and Cache settings */
1039 /* read cp15 control register */
1040 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1041 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1042 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1043 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1044 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1045
1046 /* tracing enabled, read collected trace data */
1047 if (xscale->trace.mode != XSCALE_TRACE_DISABLED)
1048 {
1049 xscale_read_trace(target);
1050
1051 /* Resume if entered debug due to buffer fill and we're still collecting
1052 * trace data. Note that a debug exception due to trace buffer full
1053 * can only happen in fill mode. */
1054 if (xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1055 {
1056 if (--xscale->trace.fill_counter > 0)
1057 xscale_resume(target, 1, 0x0, 1, 0);
1058 }
1059 else /* entered debug for other reason; reset counter */
1060 xscale->trace.fill_counter = 0;
1061 }
1062
1063 return ERROR_OK;
1064 }
1065
1066 static int xscale_halt(struct target *target)
1067 {
1068 struct xscale_common *xscale = target_to_xscale(target);
1069
1070 LOG_DEBUG("target->state: %s",
1071 target_state_name(target));
1072
1073 if (target->state == TARGET_HALTED)
1074 {
1075 LOG_DEBUG("target was already halted");
1076 return ERROR_OK;
1077 }
1078 else if (target->state == TARGET_UNKNOWN)
1079 {
1080 /* this must not happen for a xscale target */
1081 LOG_ERROR("target was in unknown state when halt was requested");
1082 return ERROR_TARGET_INVALID;
1083 }
1084 else if (target->state == TARGET_RESET)
1085 {
1086 LOG_DEBUG("target->state == TARGET_RESET");
1087 }
1088 else
1089 {
1090 /* assert external dbg break */
1091 xscale->external_debug_break = 1;
1092 xscale_read_dcsr(target);
1093
1094 target->debug_reason = DBG_REASON_DBGRQ;
1095 }
1096
1097 return ERROR_OK;
1098 }
1099
1100 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1101 {
1102 struct xscale_common *xscale = target_to_xscale(target);
1103 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1104 int retval;
1105
1106 if (xscale->ibcr0_used)
1107 {
1108 struct breakpoint *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1109
1110 if (ibcr0_bp)
1111 {
1112 xscale_unset_breakpoint(target, ibcr0_bp);
1113 }
1114 else
1115 {
1116 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1117 exit(-1);
1118 }
1119 }
1120
1121 if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
1122 return retval;
1123
1124 return ERROR_OK;
1125 }
1126
1127 static int xscale_disable_single_step(struct target *target)
1128 {
1129 struct xscale_common *xscale = target_to_xscale(target);
1130 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1131 int retval;
1132
1133 if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
1134 return retval;
1135
1136 return ERROR_OK;
1137 }
1138
1139 static void xscale_enable_watchpoints(struct target *target)
1140 {
1141 struct watchpoint *watchpoint = target->watchpoints;
1142
1143 while (watchpoint)
1144 {
1145 if (watchpoint->set == 0)
1146 xscale_set_watchpoint(target, watchpoint);
1147 watchpoint = watchpoint->next;
1148 }
1149 }
1150
1151 static void xscale_enable_breakpoints(struct target *target)
1152 {
1153 struct breakpoint *breakpoint = target->breakpoints;
1154
1155 /* set any pending breakpoints */
1156 while (breakpoint)
1157 {
1158 if (breakpoint->set == 0)
1159 xscale_set_breakpoint(target, breakpoint);
1160 breakpoint = breakpoint->next;
1161 }
1162 }
1163
1164 static void xscale_free_trace_data(struct xscale_common *xscale)
1165 {
1166 struct xscale_trace_data *td = xscale->trace.data;
1167 while (td)
1168 {
1169 struct xscale_trace_data *next_td = td->next;
1170 if (td->entries)
1171 free(td->entries);
1172 free(td);
1173 td = next_td;
1174 }
1175 xscale->trace.data = NULL;
1176 }
1177
1178 static int xscale_resume(struct target *target, int current,
1179 uint32_t address, int handle_breakpoints, int debug_execution)
1180 {
1181 struct xscale_common *xscale = target_to_xscale(target);
1182 struct arm *armv4_5 = &xscale->armv4_5_common;
1183 uint32_t current_pc;
1184 int retval;
1185 int i;
1186
1187 LOG_DEBUG("-");
1188
1189 if (target->state != TARGET_HALTED)
1190 {
1191 LOG_WARNING("target not halted");
1192 return ERROR_TARGET_NOT_HALTED;
1193 }
1194
1195 if (!debug_execution)
1196 {
1197 target_free_all_working_areas(target);
1198 }
1199
1200 /* update vector tables */
1201 if ((retval = xscale_update_vectors(target)) != ERROR_OK)
1202 return retval;
1203
1204 /* current = 1: continue on current pc, otherwise continue at <address> */
1205 if (!current)
1206 buf_set_u32(armv4_5->pc->value, 0, 32, address);
1207
1208 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1209
1210 /* if we're at the reset vector, we have to simulate the branch */
1211 if (current_pc == 0x0)
1212 {
1213 arm_simulate_step(target, NULL);
1214 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1215 }
1216
1217 /* the front-end may request us not to handle breakpoints */
1218 if (handle_breakpoints)
1219 {
1220 struct breakpoint *breakpoint;
1221 breakpoint = breakpoint_find(target,
1222 buf_get_u32(armv4_5->pc->value, 0, 32));
1223 if (breakpoint != NULL)
1224 {
1225 uint32_t next_pc;
1226 enum trace_mode saved_trace_mode;
1227
1228 /* there's a breakpoint at the current PC, we have to step over it */
1229 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1230 xscale_unset_breakpoint(target, breakpoint);
1231
1232 /* calculate PC of next instruction */
1233 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1234 {
1235 uint32_t current_opcode;
1236 target_read_u32(target, current_pc, &current_opcode);
1237 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1238 }
1239
1240 LOG_DEBUG("enable single-step");
1241 xscale_enable_single_step(target, next_pc);
1242
1243 /* restore banked registers */
1244 retval = xscale_restore_banked(target);
1245 if (retval != ERROR_OK)
1246 return retval;
1247
1248 /* send resume request */
1249 xscale_send_u32(target, 0x30);
1250
1251 /* send CPSR */
1252 xscale_send_u32(target,
1253 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1254 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1255 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1256
1257 for (i = 7; i >= 0; i--)
1258 {
1259 /* send register */
1260 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1261 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1262 }
1263
1264 /* send PC */
1265 xscale_send_u32(target,
1266 buf_get_u32(armv4_5->pc->value, 0, 32));
1267 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32,
1268 buf_get_u32(armv4_5->pc->value, 0, 32));
1269
1270 /* disable trace data collection in xscale_debug_entry() */
1271 saved_trace_mode = xscale->trace.mode;
1272 xscale->trace.mode = XSCALE_TRACE_DISABLED;
1273
1274 /* wait for and process debug entry */
1275 xscale_debug_entry(target);
1276
1277 /* re-enable trace buffer, if enabled previously */
1278 xscale->trace.mode = saved_trace_mode;
1279
1280 LOG_DEBUG("disable single-step");
1281 xscale_disable_single_step(target);
1282
1283 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1284 xscale_set_breakpoint(target, breakpoint);
1285 }
1286 }
1287
1288 /* enable any pending breakpoints and watchpoints */
1289 xscale_enable_breakpoints(target);
1290 xscale_enable_watchpoints(target);
1291
1292 /* restore banked registers */
1293 retval = xscale_restore_banked(target);
1294 if (retval != ERROR_OK)
1295 return retval;
1296
1297 /* send resume request (command 0x30 or 0x31)
1298 * clean the trace buffer if it is to be enabled (0x62) */
1299 if (xscale->trace.mode != XSCALE_TRACE_DISABLED)
1300 {
1301 if (xscale->trace.mode == XSCALE_TRACE_FILL)
1302 {
1303 /* If trace enabled in fill mode and starting collection of new set
1304 * of buffers, initialize buffer counter and free previous buffers */
1305 if (xscale->trace.fill_counter == 0)
1306 {
1307 xscale->trace.fill_counter = xscale->trace.buffer_fill;
1308 xscale_free_trace_data(xscale);
1309 }
1310 }
1311 else /* wrap mode; free previous buffer */
1312 xscale_free_trace_data(xscale);
1313
1314 xscale_send_u32(target, 0x62);
1315 xscale_send_u32(target, 0x31);
1316 }
1317 else
1318 xscale_send_u32(target, 0x30);
1319
1320 /* send CPSR */
1321 xscale_send_u32(target, buf_get_u32(armv4_5->cpsr->value, 0, 32));
1322 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1323 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1324
1325 for (i = 7; i >= 0; i--)
1326 {
1327 /* send register */
1328 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1329 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1330 }
1331
1332 /* send PC */
1333 xscale_send_u32(target, buf_get_u32(armv4_5->pc->value, 0, 32));
1334 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1335 buf_get_u32(armv4_5->pc->value, 0, 32));
1336
1337 target->debug_reason = DBG_REASON_NOTHALTED;
1338
1339 if (!debug_execution)
1340 {
1341 /* registers are now invalid */
1342 register_cache_invalidate(armv4_5->core_cache);
1343 target->state = TARGET_RUNNING;
1344 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1345 }
1346 else
1347 {
1348 target->state = TARGET_DEBUG_RUNNING;
1349 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1350 }
1351
1352 LOG_DEBUG("target resumed");
1353
1354 return ERROR_OK;
1355 }
1356
1357 static int xscale_step_inner(struct target *target, int current,
1358 uint32_t address, int handle_breakpoints)
1359 {
1360 struct xscale_common *xscale = target_to_xscale(target);
1361 struct arm *armv4_5 = &xscale->armv4_5_common;
1362 uint32_t next_pc;
1363 int retval;
1364 int i;
1365
1366 target->debug_reason = DBG_REASON_SINGLESTEP;
1367
1368 /* calculate PC of next instruction */
1369 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1370 {
1371 uint32_t current_opcode, current_pc;
1372 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1373
1374 target_read_u32(target, current_pc, &current_opcode);
1375 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1376 return retval;
1377 }
1378
1379 LOG_DEBUG("enable single-step");
1380 if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
1381 return retval;
1382
1383 /* restore banked registers */
1384 if ((retval = xscale_restore_banked(target)) != ERROR_OK)
1385 return retval;
1386
1387 /* send resume request (command 0x30 or 0x31)
1388 * clean the trace buffer if it is to be enabled (0x62) */
1389 if (xscale->trace.mode != XSCALE_TRACE_DISABLED)
1390 {
1391 if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
1392 return retval;
1393 if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
1394 return retval;
1395 }
1396 else
1397 if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
1398 return retval;
1399
1400 /* send CPSR */
1401 retval = xscale_send_u32(target,
1402 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1403 if (retval != ERROR_OK)
1404 return retval;
1405 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1406 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1407
1408 for (i = 7; i >= 0; i--)
1409 {
1410 /* send register */
1411 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
1412 return retval;
1413 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1414 }
1415
1416 /* send PC */
1417 retval = xscale_send_u32(target,
1418 buf_get_u32(armv4_5->pc->value, 0, 32));
1419 if (retval != ERROR_OK)
1420 return retval;
1421 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1422 buf_get_u32(armv4_5->pc->value, 0, 32));
1423
1424 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1425
1426 /* registers are now invalid */
1427 register_cache_invalidate(armv4_5->core_cache);
1428
1429 /* wait for and process debug entry */
1430 if ((retval = xscale_debug_entry(target)) != ERROR_OK)
1431 return retval;
1432
1433 LOG_DEBUG("disable single-step");
1434 if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
1435 return retval;
1436
1437 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1438
1439 return ERROR_OK;
1440 }
1441
1442 static int xscale_step(struct target *target, int current,
1443 uint32_t address, int handle_breakpoints)
1444 {
1445 struct arm *armv4_5 = target_to_arm(target);
1446 struct breakpoint *breakpoint = NULL;
1447
1448 uint32_t current_pc;
1449 int retval;
1450
1451 if (target->state != TARGET_HALTED)
1452 {
1453 LOG_WARNING("target not halted");
1454 return ERROR_TARGET_NOT_HALTED;
1455 }
1456
1457 /* current = 1: continue on current pc, otherwise continue at <address> */
1458 if (!current)
1459 buf_set_u32(armv4_5->pc->value, 0, 32, address);
1460
1461 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1462
1463 /* if we're at the reset vector, we have to simulate the step */
1464 if (current_pc == 0x0)
1465 {
1466 if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
1467 return retval;
1468 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1469 LOG_DEBUG("current pc %" PRIx32, current_pc);
1470
1471 target->debug_reason = DBG_REASON_SINGLESTEP;
1472 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1473
1474 return ERROR_OK;
1475 }
1476
1477 /* the front-end may request us not to handle breakpoints */
1478 if (handle_breakpoints)
1479 breakpoint = breakpoint_find(target,
1480 buf_get_u32(armv4_5->pc->value, 0, 32));
1481 if (breakpoint != NULL) {
1482 retval = xscale_unset_breakpoint(target, breakpoint);
1483 if (retval != ERROR_OK)
1484 return retval;
1485 }
1486
1487 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1488 if (retval != ERROR_OK)
1489 return retval;
1490
1491 if (breakpoint)
1492 {
1493 xscale_set_breakpoint(target, breakpoint);
1494 }
1495
1496 LOG_DEBUG("target stepped");
1497
1498 return ERROR_OK;
1499
1500 }
1501
1502 static int xscale_assert_reset(struct target *target)
1503 {
1504 struct xscale_common *xscale = target_to_xscale(target);
1505
1506 LOG_DEBUG("target->state: %s",
1507 target_state_name(target));
1508
1509 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1510 * end up in T-L-R, which would reset JTAG
1511 */
1512 xscale_jtag_set_instr(target->tap,
1513 XSCALE_SELDCSR << xscale->xscale_variant,
1514 TAP_IDLE);
1515
1516 /* set Hold reset, Halt mode and Trap Reset */
1517 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1518 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1519 xscale_write_dcsr(target, 1, 0);
1520
1521 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1522 xscale_jtag_set_instr(target->tap, ~0, TAP_IDLE);
1523 jtag_execute_queue();
1524
1525 /* assert reset */
1526 jtag_add_reset(0, 1);
1527
1528 /* sleep 1ms, to be sure we fulfill any requirements */
1529 jtag_add_sleep(1000);
1530 jtag_execute_queue();
1531
1532 target->state = TARGET_RESET;
1533
1534 if (target->reset_halt)
1535 {
1536 int retval;
1537 if ((retval = target_halt(target)) != ERROR_OK)
1538 return retval;
1539 }
1540
1541 return ERROR_OK;
1542 }
1543
1544 static int xscale_deassert_reset(struct target *target)
1545 {
1546 struct xscale_common *xscale = target_to_xscale(target);
1547 struct breakpoint *breakpoint = target->breakpoints;
1548
1549 LOG_DEBUG("-");
1550
1551 xscale->ibcr_available = 2;
1552 xscale->ibcr0_used = 0;
1553 xscale->ibcr1_used = 0;
1554
1555 xscale->dbr_available = 2;
1556 xscale->dbr0_used = 0;
1557 xscale->dbr1_used = 0;
1558
1559 /* mark all hardware breakpoints as unset */
1560 while (breakpoint)
1561 {
1562 if (breakpoint->type == BKPT_HARD)
1563 {
1564 breakpoint->set = 0;
1565 }
1566 breakpoint = breakpoint->next;
1567 }
1568
1569 xscale->trace.mode = XSCALE_TRACE_DISABLED;
1570 xscale_free_trace_data(xscale);
1571
1572 register_cache_invalidate(xscale->armv4_5_common.core_cache);
1573
1574 /* FIXME mark hardware watchpoints got unset too. Also,
1575 * at least some of the XScale registers are invalid...
1576 */
1577
1578 /*
1579 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1580 * contents got invalidated. Safer to force that, so writing new
1581 * contents can't ever fail..
1582 */
1583 {
1584 uint32_t address;
1585 unsigned buf_cnt;
1586 const uint8_t *buffer = xscale_debug_handler;
1587 int retval;
1588
1589 /* release SRST */
1590 jtag_add_reset(0, 0);
1591
1592 /* wait 300ms; 150 and 100ms were not enough */
1593 jtag_add_sleep(300*1000);
1594
1595 jtag_add_runtest(2030, TAP_IDLE);
1596 jtag_execute_queue();
1597
1598 /* set Hold reset, Halt mode and Trap Reset */
1599 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1600 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1601 xscale_write_dcsr(target, 1, 0);
1602
1603 /* Load the debug handler into the mini-icache. Since
1604 * it's using halt mode (not monitor mode), it runs in
1605 * "Special Debug State" for access to registers, memory,
1606 * coprocessors, trace data, etc.
1607 */
1608 address = xscale->handler_address;
1609 for (unsigned binary_size = sizeof xscale_debug_handler - 1;
1610 binary_size > 0;
1611 binary_size -= buf_cnt, buffer += buf_cnt)
1612 {
1613 uint32_t cache_line[8];
1614 unsigned i;
1615
1616 buf_cnt = binary_size;
1617 if (buf_cnt > 32)
1618 buf_cnt = 32;
1619
1620 for (i = 0; i < buf_cnt; i += 4)
1621 {
1622 /* convert LE buffer to host-endian uint32_t */
1623 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1624 }
1625
1626 for (; i < 32; i += 4)
1627 {
1628 cache_line[i / 4] = 0xe1a08008;
1629 }
1630
1631 /* only load addresses other than the reset vectors */
1632 if ((address % 0x400) != 0x0)
1633 {
1634 retval = xscale_load_ic(target, address,
1635 cache_line);
1636 if (retval != ERROR_OK)
1637 return retval;
1638 }
1639
1640 address += buf_cnt;
1641 };
1642
1643 retval = xscale_load_ic(target, 0x0,
1644 xscale->low_vectors);
1645 if (retval != ERROR_OK)
1646 return retval;
1647 retval = xscale_load_ic(target, 0xffff0000,
1648 xscale->high_vectors);
1649 if (retval != ERROR_OK)
1650 return retval;
1651
1652 jtag_add_runtest(30, TAP_IDLE);
1653
1654 jtag_add_sleep(100000);
1655
1656 /* set Hold reset, Halt mode and Trap Reset */
1657 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1658 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1659 xscale_write_dcsr(target, 1, 0);
1660
1661 /* clear Hold reset to let the target run (should enter debug handler) */
1662 xscale_write_dcsr(target, 0, 1);
1663 target->state = TARGET_RUNNING;
1664
1665 if (!target->reset_halt)
1666 {
1667 jtag_add_sleep(10000);
1668
1669 /* we should have entered debug now */
1670 xscale_debug_entry(target);
1671 target->state = TARGET_HALTED;
1672
1673 /* resume the target */
1674 xscale_resume(target, 1, 0x0, 1, 0);
1675 }
1676 }
1677
1678 return ERROR_OK;
1679 }
1680
1681 static int xscale_read_core_reg(struct target *target, struct reg *r,
1682 int num, enum arm_mode mode)
1683 {
1684 /** \todo add debug handler support for core register reads */
1685 LOG_ERROR("not implemented");
1686 return ERROR_OK;
1687 }
1688
1689 static int xscale_write_core_reg(struct target *target, struct reg *r,
1690 int num, enum arm_mode mode, uint32_t value)
1691 {
1692 /** \todo add debug handler support for core register writes */
1693 LOG_ERROR("not implemented");
1694 return ERROR_OK;
1695 }
1696
1697 static int xscale_full_context(struct target *target)
1698 {
1699 struct arm *armv4_5 = target_to_arm(target);
1700
1701 uint32_t *buffer;
1702
1703 int i, j;
1704
1705 LOG_DEBUG("-");
1706
1707 if (target->state != TARGET_HALTED)
1708 {
1709 LOG_WARNING("target not halted");
1710 return ERROR_TARGET_NOT_HALTED;
1711 }
1712
1713 buffer = malloc(4 * 8);
1714
1715 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1716 * we can't enter User mode on an XScale (unpredictable),
1717 * but User shares registers with SYS
1718 */
1719 for (i = 1; i < 7; i++)
1720 {
1721 enum arm_mode mode = armv4_5_number_to_mode(i);
1722 bool valid = true;
1723 struct reg *r;
1724
1725 if (mode == ARM_MODE_USR)
1726 continue;
1727
1728 /* check if there are invalid registers in the current mode
1729 */
1730 for (j = 0; valid && j <= 16; j++)
1731 {
1732 if (!ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1733 mode, j).valid)
1734 valid = false;
1735 }
1736 if (valid)
1737 continue;
1738
1739 /* request banked registers */
1740 xscale_send_u32(target, 0x0);
1741
1742 /* send CPSR for desired bank mode */
1743 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1744
1745 /* get banked registers: r8 to r14; and SPSR
1746 * except in USR/SYS mode
1747 */
1748 if (mode != ARM_MODE_SYS) {
1749 /* SPSR */
1750 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1751 mode, 16);
1752
1753 xscale_receive(target, buffer, 8);
1754
1755 buf_set_u32(r->value, 0, 32, buffer[7]);
1756 r->dirty = false;
1757 r->valid = true;
1758 } else {
1759 xscale_receive(target, buffer, 7);
1760 }
1761
1762 /* move data from buffer to register cache */
1763 for (j = 8; j <= 14; j++)
1764 {
1765 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1766 mode, j);
1767
1768 buf_set_u32(r->value, 0, 32, buffer[j - 8]);
1769 r->dirty = false;
1770 r->valid = true;
1771 }
1772 }
1773
1774 free(buffer);
1775
1776 return ERROR_OK;
1777 }
1778
1779 static int xscale_restore_banked(struct target *target)
1780 {
1781 struct arm *armv4_5 = target_to_arm(target);
1782
1783 int i, j;
1784
1785 if (target->state != TARGET_HALTED)
1786 {
1787 LOG_WARNING("target not halted");
1788 return ERROR_TARGET_NOT_HALTED;
1789 }
1790
1791 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1792 * and check if any banked registers need to be written. Ignore
1793 * USR mode (number 0) in favor of SYS; we can't enter User mode on
1794 * an XScale (unpredictable), but they share all registers.
1795 */
1796 for (i = 1; i < 7; i++)
1797 {
1798 enum arm_mode mode = armv4_5_number_to_mode(i);
1799 struct reg *r;
1800
1801 if (mode == ARM_MODE_USR)
1802 continue;
1803
1804 /* check if there are dirty registers in this mode */
1805 for (j = 8; j <= 14; j++)
1806 {
1807 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1808 mode, j).dirty)
1809 goto dirty;
1810 }
1811
1812 /* if not USR/SYS, check if the SPSR needs to be written */
1813 if (mode != ARM_MODE_SYS)
1814 {
1815 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1816 mode, 16).dirty)
1817 goto dirty;
1818 }
1819
1820 /* there's nothing to flush for this mode */
1821 continue;
1822
1823 dirty:
1824 /* command 0x1: "send banked registers" */
1825 xscale_send_u32(target, 0x1);
1826
1827 /* send CPSR for desired mode */
1828 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1829
1830 /* send r8 to r14/lr ... only FIQ needs more than r13..r14,
1831 * but this protocol doesn't understand that nuance.
1832 */
1833 for (j = 8; j <= 14; j++) {
1834 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1835 mode, j);
1836 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1837 r->dirty = false;
1838 }
1839
1840 /* send spsr if not in USR/SYS mode */
1841 if (mode != ARM_MODE_SYS) {
1842 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1843 mode, 16);
1844 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1845 r->dirty = false;
1846 }
1847 }
1848
1849 return ERROR_OK;
1850 }
1851
1852 static int xscale_read_memory(struct target *target, uint32_t address,
1853 uint32_t size, uint32_t count, uint8_t *buffer)
1854 {
1855 struct xscale_common *xscale = target_to_xscale(target);
1856 uint32_t *buf32;
1857 uint32_t i;
1858 int retval;
1859
1860 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1861
1862 if (target->state != TARGET_HALTED)
1863 {
1864 LOG_WARNING("target not halted");
1865 return ERROR_TARGET_NOT_HALTED;
1866 }
1867
1868 /* sanitize arguments */
1869 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1870 return ERROR_INVALID_ARGUMENTS;
1871
1872 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1873 return ERROR_TARGET_UNALIGNED_ACCESS;
1874
1875 /* send memory read request (command 0x1n, n: access size) */
1876 if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
1877 return retval;
1878
1879 /* send base address for read request */
1880 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1881 return retval;
1882
1883 /* send number of requested data words */
1884 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1885 return retval;
1886
1887 /* receive data from target (count times 32-bit words in host endianness) */
1888 buf32 = malloc(4 * count);
1889 if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
1890 return retval;
1891
1892 /* extract data from host-endian buffer into byte stream */
1893 for (i = 0; i < count; i++)
1894 {
1895 switch (size)
1896 {
1897 case 4:
1898 target_buffer_set_u32(target, buffer, buf32[i]);
1899 buffer += 4;
1900 break;
1901 case 2:
1902 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1903 buffer += 2;
1904 break;
1905 case 1:
1906 *buffer++ = buf32[i] & 0xff;
1907 break;
1908 default:
1909 LOG_ERROR("invalid read size");
1910 return ERROR_INVALID_ARGUMENTS;
1911 }
1912 }
1913
1914 free(buf32);
1915
1916 /* examine DCSR, to see if Sticky Abort (SA) got set */
1917 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1918 return retval;
1919 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1920 {
1921 /* clear SA bit */
1922 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1923 return retval;
1924
1925 return ERROR_TARGET_DATA_ABORT;
1926 }
1927
1928 return ERROR_OK;
1929 }
1930
1931 static int xscale_read_phys_memory(struct target *target, uint32_t address,
1932 uint32_t size, uint32_t count, uint8_t *buffer)
1933 {
1934 struct xscale_common *xscale = target_to_xscale(target);
1935
1936 /* with MMU inactive, there are only physical addresses */
1937 if (!xscale->armv4_5_mmu.mmu_enabled)
1938 return xscale_read_memory(target, address, size, count, buffer);
1939
1940 /** \todo: provide a non-stub implementation of this routine. */
1941 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1942 target_name(target), __func__);
1943 return ERROR_FAIL;
1944 }
1945
1946 static int xscale_write_memory(struct target *target, uint32_t address,
1947 uint32_t size, uint32_t count, const uint8_t *buffer)
1948 {
1949 struct xscale_common *xscale = target_to_xscale(target);
1950 int retval;
1951
1952 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1953
1954 if (target->state != TARGET_HALTED)
1955 {
1956 LOG_WARNING("target not halted");
1957 return ERROR_TARGET_NOT_HALTED;
1958 }
1959
1960 /* sanitize arguments */
1961 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1962 return ERROR_INVALID_ARGUMENTS;
1963
1964 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1965 return ERROR_TARGET_UNALIGNED_ACCESS;
1966
1967 /* send memory write request (command 0x2n, n: access size) */
1968 if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
1969 return retval;
1970
1971 /* send base address for read request */
1972 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1973 return retval;
1974
1975 /* send number of requested data words to be written*/
1976 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1977 return retval;
1978
1979 /* extract data from host-endian buffer into byte stream */
1980 #if 0
1981 for (i = 0; i < count; i++)
1982 {
1983 switch (size)
1984 {
1985 case 4:
1986 value = target_buffer_get_u32(target, buffer);
1987 xscale_send_u32(target, value);
1988 buffer += 4;
1989 break;
1990 case 2:
1991 value = target_buffer_get_u16(target, buffer);
1992 xscale_send_u32(target, value);
1993 buffer += 2;
1994 break;
1995 case 1:
1996 value = *buffer;
1997 xscale_send_u32(target, value);
1998 buffer += 1;
1999 break;
2000 default:
2001 LOG_ERROR("should never get here");
2002 exit(-1);
2003 }
2004 }
2005 #endif
2006 if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
2007 return retval;
2008
2009 /* examine DCSR, to see if Sticky Abort (SA) got set */
2010 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
2011 return retval;
2012 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
2013 {
2014 /* clear SA bit */
2015 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
2016 return retval;
2017
2018 LOG_ERROR("data abort writing memory");
2019 return ERROR_TARGET_DATA_ABORT;
2020 }
2021
2022 return ERROR_OK;
2023 }
2024
2025 static int xscale_write_phys_memory(struct target *target, uint32_t address,
2026 uint32_t size, uint32_t count, const uint8_t *buffer)
2027 {
2028 struct xscale_common *xscale = target_to_xscale(target);
2029
2030 /* with MMU inactive, there are only physical addresses */
2031 if (!xscale->armv4_5_mmu.mmu_enabled)
2032 return xscale_write_memory(target, address, size, count, buffer);
2033
2034 /** \todo: provide a non-stub implementation of this routine. */
2035 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
2036 target_name(target), __func__);
2037 return ERROR_FAIL;
2038 }
2039
2040 static int xscale_bulk_write_memory(struct target *target, uint32_t address,
2041 uint32_t count, const uint8_t *buffer)
2042 {
2043 return xscale_write_memory(target, address, 4, count, buffer);
2044 }
2045
2046 static int xscale_get_ttb(struct target *target, uint32_t *result)
2047 {
2048 struct xscale_common *xscale = target_to_xscale(target);
2049 uint32_t ttb;
2050 int retval;
2051
2052 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
2053 if (retval != ERROR_OK)
2054 return retval;
2055 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
2056
2057 *result = ttb;
2058
2059 return ERROR_OK;
2060 }
2061
2062 static int xscale_disable_mmu_caches(struct target *target, int mmu,
2063 int d_u_cache, int i_cache)
2064 {
2065 struct xscale_common *xscale = target_to_xscale(target);
2066 uint32_t cp15_control;
2067 int retval;
2068
2069 /* read cp15 control register */
2070 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2071 if (retval !=ERROR_OK)
2072 return retval;
2073 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2074
2075 if (mmu)
2076 cp15_control &= ~0x1U;
2077
2078 if (d_u_cache)
2079 {
2080 /* clean DCache */
2081 retval = xscale_send_u32(target, 0x50);
2082 if (retval !=ERROR_OK)
2083 return retval;
2084 retval = xscale_send_u32(target, xscale->cache_clean_address);
2085 if (retval !=ERROR_OK)
2086 return retval;
2087
2088 /* invalidate DCache */
2089 retval = xscale_send_u32(target, 0x51);
2090 if (retval !=ERROR_OK)
2091 return retval;
2092
2093 cp15_control &= ~0x4U;
2094 }
2095
2096 if (i_cache)
2097 {
2098 /* invalidate ICache */
2099 retval = xscale_send_u32(target, 0x52);
2100 if (retval !=ERROR_OK)
2101 return retval;
2102 cp15_control &= ~0x1000U;
2103 }
2104
2105 /* write new cp15 control register */
2106 retval = xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2107 if (retval !=ERROR_OK)
2108 return retval;
2109
2110 /* execute cpwait to ensure outstanding operations complete */
2111 retval = xscale_send_u32(target, 0x53);
2112 return retval;
2113 }
2114
2115 static int xscale_enable_mmu_caches(struct target *target, int mmu,
2116 int d_u_cache, int i_cache)
2117 {
2118 struct xscale_common *xscale = target_to_xscale(target);
2119 uint32_t cp15_control;
2120 int retval;
2121
2122 /* read cp15 control register */
2123 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2124 if (retval !=ERROR_OK)
2125 return retval;
2126 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2127
2128 if (mmu)
2129 cp15_control |= 0x1U;
2130
2131 if (d_u_cache)
2132 cp15_control |= 0x4U;
2133
2134 if (i_cache)
2135 cp15_control |= 0x1000U;
2136
2137 /* write new cp15 control register */
2138 retval = xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2139 if (retval !=ERROR_OK)
2140 return retval;
2141
2142 /* execute cpwait to ensure outstanding operations complete */
2143 retval = xscale_send_u32(target, 0x53);
2144 return retval;
2145 }
2146
2147 static int xscale_set_breakpoint(struct target *target,
2148 struct breakpoint *breakpoint)
2149 {
2150 int retval;
2151 struct xscale_common *xscale = target_to_xscale(target);
2152
2153 if (target->state != TARGET_HALTED)
2154 {
2155 LOG_WARNING("target not halted");
2156 return ERROR_TARGET_NOT_HALTED;
2157 }
2158
2159 if (breakpoint->set)
2160 {
2161 LOG_WARNING("breakpoint already set");
2162 return ERROR_OK;
2163 }
2164
2165 if (breakpoint->type == BKPT_HARD)
2166 {
2167 uint32_t value = breakpoint->address | 1;
2168 if (!xscale->ibcr0_used)
2169 {
2170 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2171 xscale->ibcr0_used = 1;
2172 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2173 }
2174 else if (!xscale->ibcr1_used)
2175 {
2176 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2177 xscale->ibcr1_used = 1;
2178 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2179 }
2180 else
2181 { /* bug: availability previously verified in xscale_add_breakpoint() */
2182 LOG_ERROR("BUG: no hardware comparator available");
2183 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2184 }
2185 }
2186 else if (breakpoint->type == BKPT_SOFT)
2187 {
2188 if (breakpoint->length == 4)
2189 {
2190 /* keep the original instruction in target endianness */
2191 if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2192 {
2193 return retval;
2194 }
2195 /* write the bkpt instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2196 if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2197 {
2198 return retval;
2199 }
2200 }
2201 else
2202 {
2203 /* keep the original instruction in target endianness */
2204 if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2205 {
2206 return retval;
2207 }
2208 /* write the bkpt instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2209 if ((retval = target_write_u16(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2210 {
2211 return retval;
2212 }
2213 }
2214 breakpoint->set = 1;
2215
2216 xscale_send_u32(target, 0x50); /* clean dcache */
2217 xscale_send_u32(target, xscale->cache_clean_address);
2218 xscale_send_u32(target, 0x51); /* invalidate dcache */
2219 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2220 }
2221
2222 return ERROR_OK;
2223 }
2224
2225 static int xscale_add_breakpoint(struct target *target,
2226 struct breakpoint *breakpoint)
2227 {
2228 struct xscale_common *xscale = target_to_xscale(target);
2229
2230 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2231 {
2232 LOG_ERROR("no breakpoint unit available for hardware breakpoint");
2233 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2234 }
2235
2236 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2237 {
2238 LOG_ERROR("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2239 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2240 }
2241
2242 if (breakpoint->type == BKPT_HARD)
2243 {
2244 xscale->ibcr_available--;
2245 }
2246
2247 return xscale_set_breakpoint(target, breakpoint);
2248 }
2249
2250 static int xscale_unset_breakpoint(struct target *target,
2251 struct breakpoint *breakpoint)
2252 {
2253 int retval;
2254 struct xscale_common *xscale = target_to_xscale(target);
2255
2256 if (target->state != TARGET_HALTED)
2257 {
2258 LOG_WARNING("target not halted");
2259 return ERROR_TARGET_NOT_HALTED;
2260 }
2261
2262 if (!breakpoint->set)
2263 {
2264 LOG_WARNING("breakpoint not set");
2265 return ERROR_OK;
2266 }
2267
2268 if (breakpoint->type == BKPT_HARD)
2269 {
2270 if (breakpoint->set == 1)
2271 {
2272 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2273 xscale->ibcr0_used = 0;
2274 }
2275 else if (breakpoint->set == 2)
2276 {
2277 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2278 xscale->ibcr1_used = 0;
2279 }
2280 breakpoint->set = 0;
2281 }
2282 else
2283 {
2284 /* restore original instruction (kept in target endianness) */
2285 if (breakpoint->length == 4)
2286 {
2287 if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2288 {
2289 return retval;
2290 }
2291 }
2292 else
2293 {
2294 if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2295 {
2296 return retval;
2297 }
2298 }
2299 breakpoint->set = 0;
2300
2301 xscale_send_u32(target, 0x50); /* clean dcache */
2302 xscale_send_u32(target, xscale->cache_clean_address);
2303 xscale_send_u32(target, 0x51); /* invalidate dcache */
2304 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2305 }
2306
2307 return ERROR_OK;
2308 }
2309
2310 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2311 {
2312 struct xscale_common *xscale = target_to_xscale(target);
2313
2314 if (target->state != TARGET_HALTED)
2315 {
2316 LOG_ERROR("target not halted");
2317 return ERROR_TARGET_NOT_HALTED;
2318 }
2319
2320 if (breakpoint->set)
2321 {
2322 xscale_unset_breakpoint(target, breakpoint);
2323 }
2324
2325 if (breakpoint->type == BKPT_HARD)
2326 xscale->ibcr_available++;
2327
2328 return ERROR_OK;
2329 }
2330
2331 static int xscale_set_watchpoint(struct target *target,
2332 struct watchpoint *watchpoint)
2333 {
2334 struct xscale_common *xscale = target_to_xscale(target);
2335 uint32_t enable = 0;
2336 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2337 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2338
2339 if (target->state != TARGET_HALTED)
2340 {
2341 LOG_ERROR("target not halted");
2342 return ERROR_TARGET_NOT_HALTED;
2343 }
2344
2345 switch (watchpoint->rw)
2346 {
2347 case WPT_READ:
2348 enable = 0x3;
2349 break;
2350 case WPT_ACCESS:
2351 enable = 0x2;
2352 break;
2353 case WPT_WRITE:
2354 enable = 0x1;
2355 break;
2356 default:
2357 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2358 }
2359
2360 /* For watchpoint across more than one word, both DBR registers must
2361 be enlisted, with the second used as a mask. */
2362 if (watchpoint->length > 4)
2363 {
2364 if (xscale->dbr0_used || xscale->dbr1_used)
2365 {
2366 LOG_ERROR("BUG: sufficient hardware comparators unavailable");
2367 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2368 }
2369
2370 /* Write mask value to DBR1, based on the length argument.
2371 * Address bits ignored by the comparator are those set in mask. */
2372 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1],
2373 watchpoint->length - 1);
2374 xscale->dbr1_used = 1;
2375 enable |= 0x100; /* DBCON[M] */
2376 }
2377
2378 if (!xscale->dbr0_used)
2379 {
2380 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2381 dbcon_value |= enable;
2382 xscale_set_reg_u32(dbcon, dbcon_value);
2383 watchpoint->set = 1;
2384 xscale->dbr0_used = 1;
2385 }
2386 else if (!xscale->dbr1_used)
2387 {
2388 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2389 dbcon_value |= enable << 2;
2390 xscale_set_reg_u32(dbcon, dbcon_value);
2391 watchpoint->set = 2;
2392 xscale->dbr1_used = 1;
2393 }
2394 else
2395 {
2396 LOG_ERROR("BUG: no hardware comparator available");
2397 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2398 }
2399
2400 return ERROR_OK;
2401 }
2402
2403 static int xscale_add_watchpoint(struct target *target,
2404 struct watchpoint *watchpoint)
2405 {
2406 struct xscale_common *xscale = target_to_xscale(target);
2407
2408 if (xscale->dbr_available < 1)
2409 {
2410 LOG_ERROR("no more watchpoint registers available");
2411 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2412 }
2413
2414 if (watchpoint->value)
2415 LOG_WARNING("xscale does not support value, mask arguments; ignoring");
2416
2417 /* check that length is a power of two */
2418 for (uint32_t len = watchpoint->length; len != 1; len /= 2)
2419 {
2420 if (len % 2)
2421 {
2422 LOG_ERROR("xscale requires that watchpoint length is a power of two");
2423 return ERROR_COMMAND_ARGUMENT_INVALID;
2424 }
2425 }
2426
2427 if (watchpoint->length == 4) /* single word watchpoint */
2428 {
2429 xscale->dbr_available--; /* one DBR reg used */
2430 return ERROR_OK;
2431 }
2432
2433 /* watchpoints across multiple words require both DBR registers */
2434 if (xscale->dbr_available < 2)
2435 {
2436 LOG_ERROR("insufficient watchpoint registers available");
2437 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2438 }
2439
2440 if (watchpoint->length > watchpoint->address)
2441 {
2442 LOG_ERROR("xscale does not support watchpoints with length "
2443 "greater than address");
2444 return ERROR_COMMAND_ARGUMENT_INVALID;
2445 }
2446
2447 xscale->dbr_available = 0;
2448 return ERROR_OK;
2449 }
2450
2451 static int xscale_unset_watchpoint(struct target *target,
2452 struct watchpoint *watchpoint)
2453 {
2454 struct xscale_common *xscale = target_to_xscale(target);
2455 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2456 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2457
2458 if (target->state != TARGET_HALTED)
2459 {
2460 LOG_WARNING("target not halted");
2461 return ERROR_TARGET_NOT_HALTED;
2462 }
2463
2464 if (!watchpoint->set)
2465 {
2466 LOG_WARNING("breakpoint not set");
2467 return ERROR_OK;
2468 }
2469
2470 if (watchpoint->set == 1)
2471 {
2472 if (watchpoint->length > 4)
2473 {
2474 dbcon_value &= ~0x103; /* clear DBCON[M] as well */
2475 xscale->dbr1_used = 0; /* DBR1 was used for mask */
2476 }
2477 else
2478 dbcon_value &= ~0x3;
2479
2480 xscale_set_reg_u32(dbcon, dbcon_value);
2481 xscale->dbr0_used = 0;
2482 }
2483 else if (watchpoint->set == 2)
2484 {
2485 dbcon_value &= ~0xc;
2486 xscale_set_reg_u32(dbcon, dbcon_value);
2487 xscale->dbr1_used = 0;
2488 }
2489 watchpoint->set = 0;
2490
2491 return ERROR_OK;
2492 }
2493
2494 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2495 {
2496 struct xscale_common *xscale = target_to_xscale(target);
2497
2498 if (target->state != TARGET_HALTED)
2499 {
2500 LOG_ERROR("target not halted");
2501 return ERROR_TARGET_NOT_HALTED;
2502 }
2503
2504 if (watchpoint->set)
2505 {
2506 xscale_unset_watchpoint(target, watchpoint);
2507 }
2508
2509 if (watchpoint->length > 4)
2510 xscale->dbr_available++; /* both DBR regs now available */
2511
2512 xscale->dbr_available++;
2513
2514 return ERROR_OK;
2515 }
2516
2517 static int xscale_get_reg(struct reg *reg)
2518 {
2519 struct xscale_reg *arch_info = reg->arch_info;
2520 struct target *target = arch_info->target;
2521 struct xscale_common *xscale = target_to_xscale(target);
2522
2523 /* DCSR, TX and RX are accessible via JTAG */
2524 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2525 {
2526 return xscale_read_dcsr(arch_info->target);
2527 }
2528 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2529 {
2530 /* 1 = consume register content */
2531 return xscale_read_tx(arch_info->target, 1);
2532 }
2533 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2534 {
2535 /* can't read from RX register (host -> debug handler) */
2536 return ERROR_OK;
2537 }
2538 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2539 {
2540 /* can't (explicitly) read from TXRXCTRL register */
2541 return ERROR_OK;
2542 }
2543 else /* Other DBG registers have to be transfered by the debug handler */
2544 {
2545 /* send CP read request (command 0x40) */
2546 xscale_send_u32(target, 0x40);
2547
2548 /* send CP register number */
2549 xscale_send_u32(target, arch_info->dbg_handler_number);
2550
2551 /* read register value */
2552 xscale_read_tx(target, 1);
2553 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2554
2555 reg->dirty = 0;
2556 reg->valid = 1;
2557 }
2558
2559 return ERROR_OK;
2560 }
2561
2562 static int xscale_set_reg(struct reg *reg, uint8_t* buf)
2563 {
2564 struct xscale_reg *arch_info = reg->arch_info;
2565 struct target *target = arch_info->target;
2566 struct xscale_common *xscale = target_to_xscale(target);
2567 uint32_t value = buf_get_u32(buf, 0, 32);
2568
2569 /* DCSR, TX and RX are accessible via JTAG */
2570 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2571 {
2572 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2573 return xscale_write_dcsr(arch_info->target, -1, -1);
2574 }
2575 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2576 {
2577 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2578 return xscale_write_rx(arch_info->target);
2579 }
2580 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2581 {
2582 /* can't write to TX register (debug-handler -> host) */
2583 return ERROR_OK;
2584 }
2585 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2586 {
2587 /* can't (explicitly) write to TXRXCTRL register */
2588 return ERROR_OK;
2589 }
2590 else /* Other DBG registers have to be transfered by the debug handler */
2591 {
2592 /* send CP write request (command 0x41) */
2593 xscale_send_u32(target, 0x41);
2594
2595 /* send CP register number */
2596 xscale_send_u32(target, arch_info->dbg_handler_number);
2597
2598 /* send CP register value */
2599 xscale_send_u32(target, value);
2600 buf_set_u32(reg->value, 0, 32, value);
2601 }
2602
2603 return ERROR_OK;
2604 }
2605
2606 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2607 {
2608 struct xscale_common *xscale = target_to_xscale(target);
2609 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2610 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2611
2612 /* send CP write request (command 0x41) */
2613 xscale_send_u32(target, 0x41);
2614
2615 /* send CP register number */
2616 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2617
2618 /* send CP register value */
2619 xscale_send_u32(target, value);
2620 buf_set_u32(dcsr->value, 0, 32, value);
2621
2622 return ERROR_OK;
2623 }
2624
2625 static int xscale_read_trace(struct target *target)
2626 {
2627 struct xscale_common *xscale = target_to_xscale(target);
2628 struct arm *armv4_5 = &xscale->armv4_5_common;
2629 struct xscale_trace_data **trace_data_p;
2630
2631 /* 258 words from debug handler
2632 * 256 trace buffer entries
2633 * 2 checkpoint addresses
2634 */
2635 uint32_t trace_buffer[258];
2636 int is_address[256];
2637 int i, j;
2638 unsigned int num_checkpoints = 0;
2639
2640 if (target->state != TARGET_HALTED)
2641 {
2642 LOG_WARNING("target must be stopped to read trace data");
2643 return ERROR_TARGET_NOT_HALTED;
2644 }
2645
2646 /* send read trace buffer command (command 0x61) */
2647 xscale_send_u32(target, 0x61);
2648
2649 /* receive trace buffer content */
2650 xscale_receive(target, trace_buffer, 258);
2651
2652 /* parse buffer backwards to identify address entries */
2653 for (i = 255; i >= 0; i--)
2654 {
2655 /* also count number of checkpointed entries */
2656 if ((trace_buffer[i] & 0xe0) == 0xc0)
2657 num_checkpoints++;
2658
2659 is_address[i] = 0;
2660 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2661 ((trace_buffer[i] & 0xf0) == 0xd0))
2662 {
2663 if (i > 0)
2664 is_address[--i] = 1;
2665 if (i > 0)
2666 is_address[--i] = 1;
2667 if (i > 0)
2668 is_address[--i] = 1;
2669 if (i > 0)
2670 is_address[--i] = 1;
2671 }
2672 }
2673
2674
2675 /* search first non-zero entry that is not part of an address */
2676 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2677 ;
2678
2679 if (j == 256)
2680 {
2681 LOG_DEBUG("no trace data collected");
2682 return ERROR_XSCALE_NO_TRACE_DATA;
2683 }
2684
2685 /* account for possible partial address at buffer start (wrap mode only) */
2686 if (is_address[0])
2687 { /* first entry is address; complete set of 4? */
2688 i = 1;
2689 while (i < 4)
2690 if (!is_address[i++])
2691 break;
2692 if (i < 4)
2693 j += i; /* partial address; can't use it */
2694 }
2695
2696 /* if first valid entry is indirect branch, can't use that either (no address) */
2697 if (((trace_buffer[j] & 0xf0) == 0x90) || ((trace_buffer[j] & 0xf0) == 0xd0))
2698 j++;
2699
2700 /* walk linked list to terminating entry */
2701 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2702 ;
2703
2704 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2705 (*trace_data_p)->next = NULL;
2706 (*trace_data_p)->chkpt0 = trace_buffer[256];
2707 (*trace_data_p)->chkpt1 = trace_buffer[257];
2708 (*trace_data_p)->last_instruction =
2709 buf_get_u32(armv4_5->pc->value, 0, 32);
2710 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2711 (*trace_data_p)->depth = 256 - j;
2712 (*trace_data_p)->num_checkpoints = num_checkpoints;
2713
2714 for (i = j; i < 256; i++)
2715 {
2716 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2717 if (is_address[i])
2718 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2719 else
2720 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2721 }
2722
2723 return ERROR_OK;
2724 }
2725
2726 static int xscale_read_instruction(struct target *target, uint32_t pc,
2727 struct arm_instruction *instruction)
2728 {
2729 struct xscale_common *const xscale = target_to_xscale(target);
2730 int i;
2731 int section = -1;
2732 size_t size_read;
2733 uint32_t opcode;
2734 int retval;
2735
2736 if (!xscale->trace.image)
2737 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2738
2739 /* search for the section the current instruction belongs to */
2740 for (i = 0; i < xscale->trace.image->num_sections; i++)
2741 {
2742 if ((xscale->trace.image->sections[i].base_address <= pc) &&
2743 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > pc))
2744 {
2745 section = i;
2746 break;
2747 }
2748 }
2749
2750 if (section == -1)
2751 {
2752 /* current instruction couldn't be found in the image */
2753 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2754 }
2755
2756 if (xscale->trace.core_state == ARM_STATE_ARM)
2757 {
2758 uint8_t buf[4];
2759 if ((retval = image_read_section(xscale->trace.image, section,
2760 pc - xscale->trace.image->sections[section].base_address,
2761 4, buf, &size_read)) != ERROR_OK)
2762 {
2763 LOG_ERROR("error while reading instruction");
2764 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2765 }
2766 opcode = target_buffer_get_u32(target, buf);
2767 arm_evaluate_opcode(opcode, pc, instruction);
2768 }
2769 else if (xscale->trace.core_state == ARM_STATE_THUMB)
2770 {
2771 uint8_t buf[2];
2772 if ((retval = image_read_section(xscale->trace.image, section,
2773 pc - xscale->trace.image->sections[section].base_address,
2774 2, buf, &size_read)) != ERROR_OK)
2775 {
2776 LOG_ERROR("error while reading instruction");
2777 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2778 }
2779 opcode = target_buffer_get_u16(target, buf);
2780 thumb_evaluate_opcode(opcode, pc, instruction);
2781 }
2782 else
2783 {
2784 LOG_ERROR("BUG: unknown core state encountered");
2785 exit(-1);
2786 }
2787
2788 return ERROR_OK;
2789 }
2790
2791 /* Extract address encoded into trace data.
2792 * Write result to address referenced by argument 'target', or 0 if incomplete. */
2793 static inline void xscale_branch_address(struct xscale_trace_data *trace_data,
2794 int i, uint32_t *target)
2795 {
2796 /* if there are less than four entries prior to the indirect branch message
2797 * we can't extract the address */
2798 if (i < 4)
2799 *target = 0;
2800 else
2801 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2802 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2803 }
2804
2805 static inline void xscale_display_instruction(struct target *target, uint32_t pc,
2806 struct arm_instruction *instruction,
2807 struct command_context *cmd_ctx)
2808 {
2809 int retval = xscale_read_instruction(target, pc, instruction);
2810 if (retval == ERROR_OK)
2811 command_print(cmd_ctx, "%s", instruction->text);
2812 else
2813 command_print(cmd_ctx, "0x%8.8" PRIx32 "\t<not found in image>", pc);
2814 }
2815
2816 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2817 {
2818 struct xscale_common *xscale = target_to_xscale(target);
2819 struct xscale_trace_data *trace_data = xscale->trace.data;
2820 int i, retval;
2821 uint32_t breakpoint_pc;
2822 struct arm_instruction instruction;
2823 uint32_t current_pc = 0; /* initialized when address determined */
2824
2825 if (!xscale->trace.image)
2826 LOG_WARNING("No trace image loaded; use 'xscale trace_image'");
2827
2828 /* loop for each trace buffer that was loaded from target */
2829 while (trace_data)
2830 {
2831 int chkpt = 0; /* incremented as checkpointed entries found */
2832 int j;
2833
2834 /* FIXME: set this to correct mode when trace buffer is first enabled */
2835 xscale->trace.core_state = ARM_STATE_ARM;
2836
2837 /* loop for each entry in this trace buffer */
2838 for (i = 0; i < trace_data->depth; i++)
2839 {
2840 int exception = 0;
2841 uint32_t chkpt_reg = 0x0;
2842 uint32_t branch_target = 0;
2843 int count;
2844
2845 /* trace entry type is upper nybble of 'message byte' */
2846 int trace_msg_type = (trace_data->entries[i].data & 0xf0) >> 4;
2847
2848 /* Target addresses of indirect branches are written into buffer
2849 * before the message byte representing the branch. Skip past it */
2850 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2851 continue;
2852
2853 switch (trace_msg_type)
2854 {
2855 case 0: /* Exceptions */
2856 case 1:
2857 case 2:
2858 case 3:
2859 case 4:
2860 case 5:
2861 case 6:
2862 case 7:
2863 exception = (trace_data->entries[i].data & 0x70) >> 4;
2864
2865 /* FIXME: vector table may be at ffff0000 */
2866 branch_target = (trace_data->entries[i].data & 0xf0) >> 2;
2867 break;
2868
2869 case 8: /* Direct Branch */
2870 break;
2871
2872 case 9: /* Indirect Branch */
2873 xscale_branch_address(trace_data, i, &branch_target);
2874 break;
2875
2876 case 13: /* Checkpointed Indirect Branch */
2877 xscale_branch_address(trace_data, i, &branch_target);
2878 if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
2879 chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is oldest */
2880 else
2881 chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and newest */
2882
2883 chkpt++;
2884 break;
2885
2886 case 12: /* Checkpointed Direct Branch */
2887 if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
2888 chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is oldest */
2889 else
2890 chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and newest */
2891
2892 /* if no current_pc, checkpoint will be starting point */
2893 if (current_pc == 0)
2894 branch_target = chkpt_reg;
2895
2896 chkpt++;
2897 break;
2898
2899 case 15: /* Roll-over */
2900 break;
2901
2902 default: /* Reserved */
2903 LOG_WARNING("trace is suspect: invalid trace message byte");
2904 continue;
2905
2906 }
2907
2908 /* If we don't have the current_pc yet, but we did get the branch target
2909 * (either from the trace buffer on indirect branch, or from a checkpoint reg),
2910 * then we can start displaying instructions at the next iteration, with
2911 * branch_target as the starting point.
2912 */
2913 if (current_pc == 0)
2914 {
2915 current_pc = branch_target; /* remains 0 unless branch_target obtained */
2916 continue;
2917 }
2918
2919 /* We have current_pc. Read and display the instructions from the image.
2920 * First, display count instructions (lower nybble of message byte). */
2921 count = trace_data->entries[i].data & 0x0f;
2922 for (j = 0; j < count; j++)
2923 {
2924 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2925 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2926 }
2927
2928 /* An additional instruction is implicitly added to count for
2929 * rollover and some exceptions: undef, swi, prefetch abort. */
2930 if ((trace_msg_type == 15) || (exception > 0 && exception < 4))
2931 {
2932 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2933 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2934 }
2935
2936 if (trace_msg_type == 15) /* rollover */
2937 continue;
2938
2939 if (exception)
2940 {
2941 command_print(cmd_ctx, "--- exception %i ---", exception);
2942 continue;
2943 }
2944
2945 /* not exception or rollover; next instruction is a branch and is
2946 * not included in the count */
2947 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2948
2949 /* for direct branches, extract branch destination from instruction */
2950 if ((trace_msg_type == 8) || (trace_msg_type == 12))
2951 {
2952 retval = xscale_read_instruction(target, current_pc, &instruction);
2953 if (retval == ERROR_OK)
2954 current_pc = instruction.info.b_bl_bx_blx.target_address;
2955 else
2956 current_pc = 0; /* branch destination unknown */
2957
2958 /* direct branch w/ checkpoint; can also get from checkpoint reg */
2959 if (trace_msg_type == 12)
2960 {
2961 if (current_pc == 0)
2962 current_pc = chkpt_reg;
2963 else if (current_pc != chkpt_reg) /* sanity check */
2964 LOG_WARNING("trace is suspect: checkpoint register "
2965 "inconsistent with adddress from image");
2966 }
2967
2968 if (current_pc == 0)
2969 command_print(cmd_ctx, "address unknown");
2970
2971 continue;
2972 }
2973
2974 /* indirect branch; the branch destination was read from trace buffer */
2975 if ((trace_msg_type == 9) || (trace_msg_type == 13))
2976 {
2977 current_pc = branch_target;
2978
2979 /* sanity check (checkpoint reg is redundant) */
2980 if ((trace_msg_type == 13) && (chkpt_reg != branch_target))
2981 LOG_WARNING("trace is suspect: checkpoint register "
2982 "inconsistent with address from trace buffer");
2983 }
2984
2985 } /* END: for (i = 0; i < trace_data->depth; i++) */
2986
2987 breakpoint_pc = trace_data->last_instruction; /* used below */
2988 trace_data = trace_data->next;
2989
2990 } /* END: while (trace_data) */
2991
2992 /* Finally... display all instructions up to the value of the pc when the
2993 * debug break occurred (saved when trace data was collected from target).
2994 * This is necessary because the trace only records execution branches and 16
2995 * consecutive instructions (rollovers), so last few typically missed.
2996 */
2997 if (current_pc == 0)
2998 return ERROR_OK; /* current_pc was never found */
2999
3000 /* how many instructions remaining? */
3001 int gap_count = (breakpoint_pc - current_pc) /
3002 (xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2);
3003
3004 /* should never be negative or over 16, but verify */
3005 if (gap_count < 0 || gap_count > 16)
3006 {
3007 LOG_WARNING("trace is suspect: excessive gap at end of trace");
3008 return ERROR_OK; /* bail; large number or negative value no good */
3009 }
3010
3011 /* display remaining instructions */
3012 for (i = 0; i < gap_count; i++)
3013 {
3014 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
3015 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
3016 }
3017
3018 return ERROR_OK;
3019 }
3020
3021 static const struct reg_arch_type xscale_reg_type = {
3022 .get = xscale_get_reg,
3023 .set = xscale_set_reg,
3024 };
3025
3026 static void xscale_build_reg_cache(struct target *target)
3027 {
3028 struct xscale_common *xscale = target_to_xscale(target);
3029 struct arm *armv4_5 = &xscale->armv4_5_common;
3030 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
3031 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
3032 int i;
3033 int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
3034
3035 (*cache_p) = arm_build_reg_cache(target, armv4_5);
3036
3037 (*cache_p)->next = malloc(sizeof(struct reg_cache));
3038 cache_p = &(*cache_p)->next;
3039
3040 /* fill in values for the xscale reg cache */
3041 (*cache_p)->name = "XScale registers";
3042 (*cache_p)->next = NULL;
3043 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
3044 (*cache_p)->num_regs = num_regs;
3045
3046 for (i = 0; i < num_regs; i++)
3047 {
3048 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
3049 (*cache_p)->reg_list[i].value = calloc(4, 1);
3050 (*cache_p)->reg_list[i].dirty = 0;
3051 (*cache_p)->reg_list[i].valid = 0;
3052 (*cache_p)->reg_list[i].size = 32;
3053 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
3054 (*cache_p)->reg_list[i].type = &xscale_reg_type;
3055 arch_info[i] = xscale_reg_arch_info[i];
3056 arch_info[i].target = target;
3057 }
3058
3059 xscale->reg_cache = (*cache_p);
3060 }
3061
3062 static int xscale_init_target(struct command_context *cmd_ctx,
3063 struct target *target)
3064 {
3065 xscale_build_reg_cache(target);
3066 return ERROR_OK;
3067 }
3068
3069 static int xscale_init_arch_info(struct target *target,
3070 struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
3071 {
3072 struct arm *armv4_5;
3073 uint32_t high_reset_branch, low_reset_branch;
3074 int i;
3075
3076 armv4_5 = &xscale->armv4_5_common;
3077
3078 /* store architecture specfic data */
3079 xscale->common_magic = XSCALE_COMMON_MAGIC;
3080
3081 /* we don't really *need* a variant param ... */
3082 if (variant) {
3083 int ir_length = 0;
3084
3085 if (strcmp(variant, "pxa250") == 0
3086 || strcmp(variant, "pxa255") == 0
3087 || strcmp(variant, "pxa26x") == 0)
3088 ir_length = 5;
3089 else if (strcmp(variant, "pxa27x") == 0
3090 || strcmp(variant, "ixp42x") == 0
3091 || strcmp(variant, "ixp45x") == 0
3092 || strcmp(variant, "ixp46x") == 0)
3093 ir_length = 7;
3094 else if (strcmp(variant, "pxa3xx") == 0)
3095 ir_length = 11;
3096 else
3097 LOG_WARNING("%s: unrecognized variant %s",
3098 tap->dotted_name, variant);
3099
3100 if (ir_length && ir_length != tap->ir_length) {
3101 LOG_WARNING("%s: IR length for %s is %d; fixing",
3102 tap->dotted_name, variant, ir_length);
3103 tap->ir_length = ir_length;
3104 }
3105 }
3106
3107 /* PXA3xx shifts the JTAG instructions */
3108 if (tap->ir_length == 11)
3109 xscale->xscale_variant = XSCALE_PXA3XX;
3110 else
3111 xscale->xscale_variant = XSCALE_IXP4XX_PXA2XX;
3112
3113 /* the debug handler isn't installed (and thus not running) at this time */
3114 xscale->handler_address = 0xfe000800;
3115
3116 /* clear the vectors we keep locally for reference */
3117 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
3118 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
3119
3120 /* no user-specified vectors have been configured yet */
3121 xscale->static_low_vectors_set = 0x0;
3122 xscale->static_high_vectors_set = 0x0;
3123
3124 /* calculate branches to debug handler */
3125 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
3126 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
3127
3128 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
3129 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
3130
3131 for (i = 1; i <= 7; i++)
3132 {
3133 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3134 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3135 }
3136
3137 /* 64kB aligned region used for DCache cleaning */
3138 xscale->cache_clean_address = 0xfffe0000;
3139
3140 xscale->hold_rst = 0;
3141 xscale->external_debug_break = 0;
3142
3143 xscale->ibcr_available = 2;
3144 xscale->ibcr0_used = 0;
3145 xscale->ibcr1_used = 0;
3146
3147 xscale->dbr_available = 2;
3148 xscale->dbr0_used = 0;
3149 xscale->dbr1_used = 0;
3150
3151 LOG_INFO("%s: hardware has 2 breakpoints and 2 watchpoints",
3152 target_name(target));
3153
3154 xscale->arm_bkpt = ARMV5_BKPT(0x0);
3155 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
3156
3157 xscale->vector_catch = 0x1;
3158
3159 xscale->trace.data = NULL;
3160 xscale->trace.image = NULL;
3161 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3162 xscale->trace.buffer_fill = 0;
3163 xscale->trace.fill_counter = 0;
3164
3165 /* prepare ARMv4/5 specific information */
3166 armv4_5->arch_info = xscale;
3167 armv4_5->read_core_reg = xscale_read_core_reg;
3168 armv4_5->write_core_reg = xscale_write_core_reg;
3169 armv4_5->full_context = xscale_full_context;
3170
3171 arm_init_arch_info(target, armv4_5);
3172
3173 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
3174 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
3175 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
3176 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
3177 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
3178 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
3179 xscale->armv4_5_mmu.has_tiny_pages = 1;
3180 xscale->armv4_5_mmu.mmu_enabled = 0;
3181
3182 return ERROR_OK;
3183 }
3184
3185 static int xscale_target_create(struct target *target, Jim_Interp *interp)
3186 {
3187 struct xscale_common *xscale;
3188
3189 if (sizeof xscale_debug_handler - 1 > 0x800) {
3190 LOG_ERROR("debug_handler.bin: larger than 2kb");
3191 return ERROR_FAIL;
3192 }
3193
3194 xscale = calloc(1, sizeof(*xscale));
3195 if (!xscale)
3196 return ERROR_FAIL;
3197
3198 return xscale_init_arch_info(target, xscale, target->tap,
3199 target->variant);
3200 }
3201
3202 COMMAND_HANDLER(xscale_handle_debug_handler_command)
3203 {
3204 struct target *target = NULL;
3205 struct xscale_common *xscale;
3206 int retval;
3207 uint32_t handler_address;
3208
3209 if (CMD_ARGC < 2)
3210 {
3211 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3212 return ERROR_OK;
3213 }
3214
3215 if ((target = get_target(CMD_ARGV[0])) == NULL)
3216 {
3217 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3218 return ERROR_FAIL;
3219 }
3220
3221 xscale = target_to_xscale(target);
3222 retval = xscale_verify_pointer(CMD_CTX, xscale);
3223 if (retval != ERROR_OK)
3224 return retval;
3225
3226 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3227
3228 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3229 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3230 {
3231 xscale->handler_address = handler_address;
3232 }
3233 else
3234 {
3235 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3236 return ERROR_FAIL;
3237 }
3238
3239 return ERROR_OK;
3240 }
3241
3242 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3243 {
3244 struct target *target = NULL;
3245 struct xscale_common *xscale;
3246 int retval;
3247 uint32_t cache_clean_address;
3248
3249 if (CMD_ARGC < 2)
3250 {
3251 return ERROR_COMMAND_SYNTAX_ERROR;
3252 }
3253
3254 target = get_target(CMD_ARGV[0]);
3255 if (target == NULL)
3256 {
3257 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3258 return ERROR_FAIL;
3259 }
3260 xscale = target_to_xscale(target);
3261 retval = xscale_verify_pointer(CMD_CTX, xscale);
3262 if (retval != ERROR_OK)
3263 return retval;
3264
3265 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3266
3267 if (cache_clean_address & 0xffff)
3268 {
3269 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3270 }
3271 else
3272 {
3273 xscale->cache_clean_address = cache_clean_address;
3274 }
3275
3276 return ERROR_OK;
3277 }
3278
3279 COMMAND_HANDLER(xscale_handle_cache_info_command)
3280 {
3281 struct target *target = get_current_target(CMD_CTX);
3282 struct xscale_common *xscale = target_to_xscale(target);
3283 int retval;
3284
3285 retval = xscale_verify_pointer(CMD_CTX, xscale);
3286 if (retval != ERROR_OK)
3287 return retval;
3288
3289 return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
3290 }
3291
3292 static int xscale_virt2phys(struct target *target,
3293 uint32_t virtual, uint32_t *physical)
3294 {
3295 struct xscale_common *xscale = target_to_xscale(target);
3296 uint32_t cb;
3297
3298 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3299 LOG_ERROR(xscale_not);
3300 return ERROR_TARGET_INVALID;
3301 }
3302
3303 uint32_t ret;
3304 int retval = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu,
3305 virtual, &cb, &ret);
3306 if (retval != ERROR_OK)
3307 return retval;
3308 *physical = ret;
3309 return ERROR_OK;
3310 }
3311
3312 static int xscale_mmu(struct target *target, int *enabled)
3313 {
3314 struct xscale_common *xscale = target_to_xscale(target);
3315
3316 if (target->state != TARGET_HALTED)
3317 {
3318 LOG_ERROR("Target not halted");
3319 return ERROR_TARGET_INVALID;
3320 }
3321 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3322 return ERROR_OK;
3323 }
3324
3325 COMMAND_HANDLER(xscale_handle_mmu_command)
3326 {
3327 struct target *target = get_current_target(CMD_CTX);
3328 struct xscale_common *xscale = target_to_xscale(target);
3329 int retval;
3330
3331 retval = xscale_verify_pointer(CMD_CTX, xscale);
3332 if (retval != ERROR_OK)
3333 return retval;
3334
3335 if (target->state != TARGET_HALTED)
3336 {
3337 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3338 return ERROR_OK;
3339 }
3340
3341 if (CMD_ARGC >= 1)
3342 {
3343 bool enable;
3344 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3345 if (enable)
3346 xscale_enable_mmu_caches(target, 1, 0, 0);
3347 else
3348 xscale_disable_mmu_caches(target, 1, 0, 0);
3349 xscale->armv4_5_mmu.mmu_enabled = enable;
3350 }
3351
3352 command_print(CMD_CTX, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3353
3354 return ERROR_OK;
3355 }
3356
3357 COMMAND_HANDLER(xscale_handle_idcache_command)
3358 {
3359 struct target *target = get_current_target(CMD_CTX);
3360 struct xscale_common *xscale = target_to_xscale(target);
3361
3362 int retval = xscale_verify_pointer(CMD_CTX, xscale);
3363 if (retval != ERROR_OK)
3364 return retval;
3365
3366 if (target->state != TARGET_HALTED)
3367 {
3368 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3369 return ERROR_OK;
3370 }
3371
3372 bool icache = false;
3373 if (strcmp(CMD_NAME, "icache") == 0)
3374 icache = true;
3375 if (CMD_ARGC >= 1)
3376 {
3377 bool enable;
3378 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3379 if (icache) {
3380 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3381 if (enable)
3382 xscale_enable_mmu_caches(target, 0, 0, 1);
3383 else
3384 xscale_disable_mmu_caches(target, 0, 0, 1);
3385 } else {
3386 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3387 if (enable)
3388 xscale_enable_mmu_caches(target, 0, 1, 0);
3389 else
3390 xscale_disable_mmu_caches(target, 0, 1, 0);
3391 }
3392 }
3393
3394 bool enabled = icache ?
3395 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3396 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3397 const char *msg = enabled ? "enabled" : "disabled";
3398 command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
3399
3400 return ERROR_OK;
3401 }
3402
3403 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3404 {
3405 struct target *target = get_current_target(CMD_CTX);
3406 struct xscale_common *xscale = target_to_xscale(target);
3407 int retval;
3408
3409 retval = xscale_verify_pointer(CMD_CTX, xscale);
3410 if (retval != ERROR_OK)
3411 return retval;
3412
3413 if (CMD_ARGC < 1)
3414 {
3415 command_print(CMD_CTX, "usage: xscale vector_catch [mask]");
3416 }
3417 else
3418 {
3419 COMMAND_PARSE_NUMBER(u8, CMD_ARGV[0], xscale->vector_catch);
3420 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3421 xscale_write_dcsr(target, -1, -1);
3422 }
3423
3424 command_print(CMD_CTX, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3425
3426 return ERROR_OK;
3427 }
3428
3429
3430 COMMAND_HANDLER(xscale_handle_vector_table_command)
3431 {
3432 struct target *target = get_current_target(CMD_CTX);
3433 struct xscale_common *xscale = target_to_xscale(target);
3434 int err = 0;
3435 int retval;
3436
3437 retval = xscale_verify_pointer(CMD_CTX, xscale);
3438 if (retval != ERROR_OK)
3439 return retval;
3440
3441 if (CMD_ARGC == 0) /* print current settings */
3442 {
3443 int idx;
3444
3445 command_print(CMD_CTX, "active user-set static vectors:");
3446 for (idx = 1; idx < 8; idx++)
3447 if (xscale->static_low_vectors_set & (1 << idx))
3448 command_print(CMD_CTX, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
3449 for (idx = 1; idx < 8; idx++)
3450 if (xscale->static_high_vectors_set & (1 << idx))
3451 command_print(CMD_CTX, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
3452 return ERROR_OK;
3453 }
3454
3455 if (CMD_ARGC != 3)
3456 err = 1;
3457 else
3458 {
3459 int idx;
3460 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3461 uint32_t vec;
3462 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3463
3464 if (idx < 1 || idx >= 8)
3465 err = 1;
3466
3467 if (!err && strcmp(CMD_ARGV[0], "low") == 0)
3468 {
3469 xscale->static_low_vectors_set |= (1<<idx);
3470 xscale->static_low_vectors[idx] = vec;
3471 }
3472 else if (!err && (strcmp(CMD_ARGV[0], "high") == 0))
3473 {
3474 xscale->static_high_vectors_set |= (1<<idx);
3475 xscale->static_high_vectors[idx] = vec;
3476 }
3477 else
3478 err = 1;
3479 }
3480
3481 if (err)
3482 command_print(CMD_CTX, "usage: xscale vector_table <high|low> <index> <code>");
3483
3484 return ERROR_OK;
3485 }
3486
3487
3488 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3489 {
3490 struct target *target = get_current_target(CMD_CTX);
3491 struct xscale_common *xscale = target_to_xscale(target);
3492 uint32_t dcsr_value;
3493 int retval;
3494
3495 retval = xscale_verify_pointer(CMD_CTX, xscale);
3496 if (retval != ERROR_OK)
3497 return retval;
3498
3499 if (target->state != TARGET_HALTED)
3500 {
3501 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3502 return ERROR_OK;
3503 }
3504
3505 if (CMD_ARGC >= 1)
3506 {
3507 if (strcmp("enable", CMD_ARGV[0]) == 0)
3508 xscale->trace.mode = XSCALE_TRACE_WRAP; /* default */
3509 else if (strcmp("disable", CMD_ARGV[0]) == 0)
3510 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3511 else
3512 return ERROR_INVALID_ARGUMENTS;
3513 }
3514
3515 if (CMD_ARGC >= 2 && xscale->trace.mode != XSCALE_TRACE_DISABLED)
3516 {
3517 if (strcmp("fill", CMD_ARGV[1]) == 0)
3518 {
3519 int buffcount = 1; /* default */
3520 if (CMD_ARGC >= 3)
3521 COMMAND_PARSE_NUMBER(int, CMD_ARGV[2], buffcount);
3522 if (buffcount < 1) /* invalid */
3523 {
3524 command_print(CMD_CTX, "fill buffer count must be > 0");
3525 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3526 return ERROR_INVALID_ARGUMENTS;
3527 }
3528 xscale->trace.buffer_fill = buffcount;
3529 xscale->trace.mode = XSCALE_TRACE_FILL;
3530 }
3531 else if (strcmp("wrap", CMD_ARGV[1]) == 0)
3532 xscale->trace.mode = XSCALE_TRACE_WRAP;
3533 else
3534 {
3535 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3536 return ERROR_INVALID_ARGUMENTS;
3537 }
3538 }
3539
3540 if (xscale->trace.mode != XSCALE_TRACE_DISABLED)
3541 {
3542 char fill_string[12];
3543 sprintf(fill_string, "fill %" PRId32, xscale->trace.buffer_fill);
3544 command_print(CMD_CTX, "trace buffer enabled (%s)",
3545 (xscale->trace.mode == XSCALE_TRACE_FILL)
3546 ? fill_string : "wrap");
3547 }
3548 else
3549 command_print(CMD_CTX, "trace buffer disabled");
3550
3551 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3552 if (xscale->trace.mode == XSCALE_TRACE_FILL)
3553 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3554 else
3555 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3556
3557 return ERROR_OK;
3558 }
3559
3560 COMMAND_HANDLER(xscale_handle_trace_image_command)
3561 {
3562 struct target *target = get_current_target(CMD_CTX);
3563 struct xscale_common *xscale = target_to_xscale(target);
3564 int retval;
3565
3566 if (CMD_ARGC < 1)
3567 {
3568 command_print(CMD_CTX, "usage: xscale trace_image <file> [base address] [type]");
3569 return ERROR_OK;
3570 }
3571
3572 retval = xscale_verify_pointer(CMD_CTX, xscale);
3573 if (retval != ERROR_OK)
3574 return retval;
3575
3576 if (xscale->trace.image)
3577 {
3578 image_close(xscale->trace.image);
3579 free(xscale->trace.image);
3580 command_print(CMD_CTX, "previously loaded image found and closed");
3581 }
3582
3583 xscale->trace.image = malloc(sizeof(struct image));
3584 xscale->trace.image->base_address_set = 0;
3585 xscale->trace.image->start_address_set = 0;
3586
3587 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3588 if (CMD_ARGC >= 2)
3589 {
3590 xscale->trace.image->base_address_set = 1;
3591 COMMAND_PARSE_NUMBER(llong, CMD_ARGV[1], xscale->trace.image->base_address);
3592 }
3593 else
3594 {
3595 xscale->trace.image->base_address_set = 0;
3596 }
3597
3598 if (image_open(xscale->trace.image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3599 {
3600 free(xscale->trace.image);
3601 xscale->trace.image = NULL;
3602 return ERROR_OK;
3603 }
3604
3605 return ERROR_OK;
3606 }
3607
3608 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3609 {
3610 struct target *target = get_current_target(CMD_CTX);
3611 struct xscale_common *xscale = target_to_xscale(target);
3612 struct xscale_trace_data *trace_data;
3613 struct fileio file;
3614 int retval;
3615
3616 retval = xscale_verify_pointer(CMD_CTX, xscale);
3617 if (retval != ERROR_OK)
3618 return retval;
3619
3620 if (target->state != TARGET_HALTED)
3621 {
3622 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3623 return ERROR_OK;
3624 }
3625
3626 if (CMD_ARGC < 1)
3627 {
3628 command_print(CMD_CTX, "usage: xscale dump_trace <file>");
3629 return ERROR_OK;
3630 }
3631
3632 trace_data = xscale->trace.data;
3633
3634 if (!trace_data)
3635 {
3636 command_print(CMD_CTX, "no trace data collected");
3637 return ERROR_OK;
3638 }
3639
3640 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3641 {
3642 return ERROR_OK;
3643 }
3644
3645 while (trace_data)
3646 {
3647 int i;
3648
3649 fileio_write_u32(&file, trace_data->chkpt0);
3650 fileio_write_u32(&file, trace_data->chkpt1);
3651 fileio_write_u32(&file, trace_data->last_instruction);
3652 fileio_write_u32(&file, trace_data->depth);
3653
3654 for (i = 0; i < trace_data->depth; i++)
3655 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3656
3657 trace_data = trace_data->next;
3658 }
3659
3660 fileio_close(&file);
3661
3662 return ERROR_OK;
3663 }
3664
3665 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3666 {
3667 struct target *target = get_current_target(CMD_CTX);
3668 struct xscale_common *xscale = target_to_xscale(target);
3669 int retval;
3670
3671 retval = xscale_verify_pointer(CMD_CTX, xscale);
3672 if (retval != ERROR_OK)
3673 return retval;
3674
3675 xscale_analyze_trace(target, CMD_CTX);
3676
3677 return ERROR_OK;
3678 }
3679
3680 COMMAND_HANDLER(xscale_handle_cp15)
3681 {
3682 struct target *target = get_current_target(CMD_CTX);
3683 struct xscale_common *xscale = target_to_xscale(target);
3684 int retval;
3685
3686 retval = xscale_verify_pointer(CMD_CTX, xscale);
3687 if (retval != ERROR_OK)
3688 return retval;
3689
3690 if (target->state != TARGET_HALTED)
3691 {
3692 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3693 return ERROR_OK;
3694 }
3695 uint32_t reg_no = 0;
3696 struct reg *reg = NULL;
3697 if (CMD_ARGC > 0)
3698 {
3699 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3700 /*translate from xscale cp15 register no to openocd register*/
3701 switch (reg_no)
3702 {
3703 case 0:
3704 reg_no = XSCALE_MAINID;
3705 break;
3706 case 1:
3707 reg_no = XSCALE_CTRL;
3708 break;
3709 case 2:
3710 reg_no = XSCALE_TTB;
3711 break;
3712 case 3:
3713 reg_no = XSCALE_DAC;
3714 break;
3715 case 5:
3716 reg_no = XSCALE_FSR;
3717 break;
3718 case 6:
3719 reg_no = XSCALE_FAR;
3720 break;
3721 case 13:
3722 reg_no = XSCALE_PID;
3723 break;
3724 case 15:
3725 reg_no = XSCALE_CPACCESS;
3726 break;
3727 default:
3728 command_print(CMD_CTX, "invalid register number");
3729 return ERROR_INVALID_ARGUMENTS;
3730 }
3731 reg = &xscale->reg_cache->reg_list[reg_no];
3732
3733 }
3734 if (CMD_ARGC == 1)
3735 {
3736 uint32_t value;
3737
3738 /* read cp15 control register */
3739 xscale_get_reg(reg);
3740 value = buf_get_u32(reg->value, 0, 32);
3741 command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
3742 }
3743 else if (CMD_ARGC == 2)
3744 {
3745 uint32_t value;
3746 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3747
3748 /* send CP write request (command 0x41) */
3749 xscale_send_u32(target, 0x41);
3750
3751 /* send CP register number */
3752 xscale_send_u32(target, reg_no);
3753
3754 /* send CP register value */
3755 xscale_send_u32(target, value);
3756
3757 /* execute cpwait to ensure outstanding operations complete */
3758 xscale_send_u32(target, 0x53);
3759 }
3760 else
3761 {
3762 command_print(CMD_CTX, "usage: cp15 [register]<, [value]>");
3763 }
3764
3765 return ERROR_OK;
3766 }
3767
3768 static const struct command_registration xscale_exec_command_handlers[] = {
3769 {
3770 .name = "cache_info",
3771 .handler = xscale_handle_cache_info_command,
3772 .mode = COMMAND_EXEC,
3773 .help = "display information about CPU caches",
3774 },
3775 {
3776 .name = "mmu",
3777 .handler = xscale_handle_mmu_command,
3778 .mode = COMMAND_EXEC,
3779 .help = "enable or disable the MMU",
3780 .usage = "['enable'|'disable']",
3781 },
3782 {
3783 .name = "icache",
3784 .handler = xscale_handle_idcache_command,
3785 .mode = COMMAND_EXEC,
3786 .help = "display ICache state, optionally enabling or "
3787 "disabling it",
3788 .usage = "['enable'|'disable']",
3789 },
3790 {
3791 .name = "dcache",
3792 .handler = xscale_handle_idcache_command,
3793 .mode = COMMAND_EXEC,
3794 .help = "display DCache state, optionally enabling or "
3795 "disabling it",
3796 .usage = "['enable'|'disable']",
3797 },
3798 {
3799 .name = "vector_catch",
3800 .handler = xscale_handle_vector_catch_command,
3801 .mode = COMMAND_EXEC,
3802 .help = "set or display 8-bit mask of vectors "
3803 "that should trigger debug entry",
3804 .usage = "[mask]",
3805 },
3806 {
3807 .name = "vector_table",
3808 .handler = xscale_handle_vector_table_command,
3809 .mode = COMMAND_EXEC,
3810 .help = "set vector table entry in mini-ICache, "
3811 "or display current tables",
3812 .usage = "[('high'|'low') index code]",
3813 },
3814 {
3815 .name = "trace_buffer",
3816 .handler = xscale_handle_trace_buffer_command,
3817 .mode = COMMAND_EXEC,
3818 .help = "display trace buffer status, enable or disable "
3819 "tracing, and optionally reconfigure trace mode",
3820 .usage = "['enable'|'disable' ['fill' [number]|'wrap']]",
3821 },
3822 {
3823 .name = "dump_trace",
3824 .handler = xscale_handle_dump_trace_command,
3825 .mode = COMMAND_EXEC,
3826 .help = "dump content of trace buffer to file",
3827 .usage = "filename",
3828 },
3829 {
3830 .name = "analyze_trace",
3831 .handler = xscale_handle_analyze_trace_buffer_command,
3832 .mode = COMMAND_EXEC,
3833 .help = "analyze content of trace buffer",
3834 .usage = "",
3835 },
3836 {
3837 .name = "trace_image",
3838 .handler = xscale_handle_trace_image_command,
3839 .mode = COMMAND_EXEC,
3840 .help = "load image from file to address (default 0)",
3841 .usage = "filename [offset [filetype]]",
3842 },
3843 {
3844 .name = "cp15",
3845 .handler = xscale_handle_cp15,
3846 .mode = COMMAND_EXEC,
3847 .help = "Read or write coprocessor 15 register.",
3848 .usage = "register [value]",
3849 },
3850 COMMAND_REGISTRATION_DONE
3851 };
3852 static const struct command_registration xscale_any_command_handlers[] = {
3853 {
3854 .name = "debug_handler",
3855 .handler = xscale_handle_debug_handler_command,
3856 .mode = COMMAND_ANY,
3857 .help = "Change address used for debug handler.",
3858 .usage = "target address",
3859 },
3860 {
3861 .name = "cache_clean_address",
3862 .handler = xscale_handle_cache_clean_address_command,
3863 .mode = COMMAND_ANY,
3864 .help = "Change address used for cleaning data cache.",
3865 .usage = "address",
3866 },
3867 {
3868 .chain = xscale_exec_command_handlers,
3869 },
3870 COMMAND_REGISTRATION_DONE
3871 };
3872 static const struct command_registration xscale_command_handlers[] = {
3873 {
3874 .chain = arm_command_handlers,
3875 },
3876 {
3877 .name = "xscale",
3878 .mode = COMMAND_ANY,
3879 .help = "xscale command group",
3880 .chain = xscale_any_command_handlers,
3881 },
3882 COMMAND_REGISTRATION_DONE
3883 };
3884
3885 struct target_type xscale_target =
3886 {
3887 .name = "xscale",
3888
3889 .poll = xscale_poll,
3890 .arch_state = xscale_arch_state,
3891
3892 .target_request_data = NULL,
3893
3894 .halt = xscale_halt,
3895 .resume = xscale_resume,
3896 .step = xscale_step,
3897
3898 .assert_reset = xscale_assert_reset,
3899 .deassert_reset = xscale_deassert_reset,
3900 .soft_reset_halt = NULL,
3901
3902 /* REVISIT on some cores, allow exporting iwmmxt registers ... */
3903 .get_gdb_reg_list = arm_get_gdb_reg_list,
3904
3905 .read_memory = xscale_read_memory,
3906 .read_phys_memory = xscale_read_phys_memory,
3907 .write_memory = xscale_write_memory,
3908 .write_phys_memory = xscale_write_phys_memory,
3909 .bulk_write_memory = xscale_bulk_write_memory,
3910
3911 .checksum_memory = arm_checksum_memory,
3912 .blank_check_memory = arm_blank_check_memory,
3913
3914 .run_algorithm = armv4_5_run_algorithm,
3915
3916 .add_breakpoint = xscale_add_breakpoint,
3917 .remove_breakpoint = xscale_remove_breakpoint,
3918 .add_watchpoint = xscale_add_watchpoint,
3919 .remove_watchpoint = xscale_remove_watchpoint,
3920
3921 .commands = xscale_command_handlers,
3922 .target_create = xscale_target_create,
3923 .init_target = xscale_init_target,
3924
3925 .virt2phys = xscale_virt2phys,
3926 .mmu = xscale_mmu
3927 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)