- jtag_khz/speed are now single parameter only. These are used
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program; if not, write to the *
17 * Free Software Foundation, Inc., *
18 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
19 ***************************************************************************/
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "replacements.h"
25
26 #include "xscale.h"
27
28 #include "arm7_9_common.h"
29 #include "register.h"
30 #include "target.h"
31 #include "armv4_5.h"
32 #include "arm_simulator.h"
33 #include "arm_disassembler.h"
34 #include "log.h"
35 #include "jtag.h"
36 #include "binarybuffer.h"
37 #include "time_support.h"
38 #include "breakpoints.h"
39 #include "fileio.h"
40
41 #include <stdlib.h>
42 #include <string.h>
43
44 #include <sys/types.h>
45 #include <unistd.h>
46 #include <errno.h>
47
48
49 /* cli handling */
50 int xscale_register_commands(struct command_context_s *cmd_ctx);
51
52 /* forward declarations */
53 int xscale_target_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc, struct target_s *target);
54 int xscale_init_target(struct command_context_s *cmd_ctx, struct target_s *target);
55 int xscale_quit();
56
57 int xscale_arch_state(struct target_s *target);
58 int xscale_poll(target_t *target);
59 int xscale_halt(target_t *target);
60 int xscale_resume(struct target_s *target, int current, u32 address, int handle_breakpoints, int debug_execution);
61 int xscale_step(struct target_s *target, int current, u32 address, int handle_breakpoints);
62 int xscale_debug_entry(target_t *target);
63 int xscale_restore_context(target_t *target);
64
65 int xscale_assert_reset(target_t *target);
66 int xscale_deassert_reset(target_t *target);
67 int xscale_soft_reset_halt(struct target_s *target);
68
69 int xscale_set_reg_u32(reg_t *reg, u32 value);
70
71 int xscale_read_core_reg(struct target_s *target, int num, enum armv4_5_mode mode);
72 int xscale_write_core_reg(struct target_s *target, int num, enum armv4_5_mode mode, u32 value);
73
74 int xscale_read_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer);
75 int xscale_write_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer);
76 int xscale_bulk_write_memory(target_t *target, u32 address, u32 count, u8 *buffer);
77
78 int xscale_add_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
79 int xscale_remove_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
80 int xscale_set_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
81 int xscale_unset_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
82 int xscale_add_watchpoint(struct target_s *target, watchpoint_t *watchpoint);
83 int xscale_remove_watchpoint(struct target_s *target, watchpoint_t *watchpoint);
84 void xscale_enable_watchpoints(struct target_s *target);
85 void xscale_enable_breakpoints(struct target_s *target);
86 static int xscale_virt2phys(struct target_s *target, u32 virtual, u32 *physical);
87 static int xscale_mmu(struct target_s *target, int *enabled);
88
89 int xscale_read_trace(target_t *target);
90
91 target_type_t xscale_target =
92 {
93 .name = "xscale",
94
95 .poll = xscale_poll,
96 .arch_state = xscale_arch_state,
97
98 .target_request_data = NULL,
99
100 .halt = xscale_halt,
101 .resume = xscale_resume,
102 .step = xscale_step,
103
104 .assert_reset = xscale_assert_reset,
105 .deassert_reset = xscale_deassert_reset,
106 .soft_reset_halt = xscale_soft_reset_halt,
107
108 .get_gdb_reg_list = armv4_5_get_gdb_reg_list,
109
110 .read_memory = xscale_read_memory,
111 .write_memory = xscale_write_memory,
112 .bulk_write_memory = xscale_bulk_write_memory,
113 .checksum_memory = arm7_9_checksum_memory,
114 .blank_check_memory = arm7_9_blank_check_memory,
115
116 .run_algorithm = armv4_5_run_algorithm,
117
118 .add_breakpoint = xscale_add_breakpoint,
119 .remove_breakpoint = xscale_remove_breakpoint,
120 .add_watchpoint = xscale_add_watchpoint,
121 .remove_watchpoint = xscale_remove_watchpoint,
122
123 .register_commands = xscale_register_commands,
124 .target_command = xscale_target_command,
125 .init_target = xscale_init_target,
126 .quit = xscale_quit,
127
128 .virt2phys = xscale_virt2phys,
129 .mmu = xscale_mmu
130 };
131
132 char* xscale_reg_list[] =
133 {
134 "XSCALE_MAINID", /* 0 */
135 "XSCALE_CACHETYPE",
136 "XSCALE_CTRL",
137 "XSCALE_AUXCTRL",
138 "XSCALE_TTB",
139 "XSCALE_DAC",
140 "XSCALE_FSR",
141 "XSCALE_FAR",
142 "XSCALE_PID",
143 "XSCALE_CPACCESS",
144 "XSCALE_IBCR0", /* 10 */
145 "XSCALE_IBCR1",
146 "XSCALE_DBR0",
147 "XSCALE_DBR1",
148 "XSCALE_DBCON",
149 "XSCALE_TBREG",
150 "XSCALE_CHKPT0",
151 "XSCALE_CHKPT1",
152 "XSCALE_DCSR",
153 "XSCALE_TX",
154 "XSCALE_RX", /* 20 */
155 "XSCALE_TXRXCTRL",
156 };
157
158 xscale_reg_t xscale_reg_arch_info[] =
159 {
160 {XSCALE_MAINID, NULL},
161 {XSCALE_CACHETYPE, NULL},
162 {XSCALE_CTRL, NULL},
163 {XSCALE_AUXCTRL, NULL},
164 {XSCALE_TTB, NULL},
165 {XSCALE_DAC, NULL},
166 {XSCALE_FSR, NULL},
167 {XSCALE_FAR, NULL},
168 {XSCALE_PID, NULL},
169 {XSCALE_CPACCESS, NULL},
170 {XSCALE_IBCR0, NULL},
171 {XSCALE_IBCR1, NULL},
172 {XSCALE_DBR0, NULL},
173 {XSCALE_DBR1, NULL},
174 {XSCALE_DBCON, NULL},
175 {XSCALE_TBREG, NULL},
176 {XSCALE_CHKPT0, NULL},
177 {XSCALE_CHKPT1, NULL},
178 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
179 {-1, NULL}, /* TX accessed via JTAG */
180 {-1, NULL}, /* RX accessed via JTAG */
181 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
182 };
183
184 int xscale_reg_arch_type = -1;
185
186 int xscale_get_reg(reg_t *reg);
187 int xscale_set_reg(reg_t *reg, u8 *buf);
188
189 int xscale_get_arch_pointers(target_t *target, armv4_5_common_t **armv4_5_p, xscale_common_t **xscale_p)
190 {
191 armv4_5_common_t *armv4_5 = target->arch_info;
192 xscale_common_t *xscale = armv4_5->arch_info;
193
194 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
195 {
196 LOG_ERROR("target isn't an XScale target");
197 return -1;
198 }
199
200 if (xscale->common_magic != XSCALE_COMMON_MAGIC)
201 {
202 LOG_ERROR("target isn't an XScale target");
203 return -1;
204 }
205
206 *armv4_5_p = armv4_5;
207 *xscale_p = xscale;
208
209 return ERROR_OK;
210 }
211
212 int xscale_jtag_set_instr(int chain_pos, u32 new_instr)
213 {
214 jtag_device_t *device = jtag_get_device(chain_pos);
215
216 if (buf_get_u32(device->cur_instr, 0, device->ir_length) != new_instr)
217 {
218 scan_field_t field;
219
220 field.device = chain_pos;
221 field.num_bits = device->ir_length;
222 field.out_value = calloc(CEIL(field.num_bits, 8), 1);
223 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
224 field.out_mask = NULL;
225 field.in_value = NULL;
226 jtag_set_check_value(&field, device->expected, device->expected_mask, NULL);
227
228 jtag_add_ir_scan(1, &field, -1);
229
230 free(field.out_value);
231 }
232
233 return ERROR_OK;
234 }
235
236 int xscale_read_dcsr(target_t *target)
237 {
238 armv4_5_common_t *armv4_5 = target->arch_info;
239 xscale_common_t *xscale = armv4_5->arch_info;
240
241 int retval;
242
243 scan_field_t fields[3];
244 u8 field0 = 0x0;
245 u8 field0_check_value = 0x2;
246 u8 field0_check_mask = 0x7;
247 u8 field2 = 0x0;
248 u8 field2_check_value = 0x0;
249 u8 field2_check_mask = 0x1;
250
251 jtag_add_end_state(TAP_PD);
252 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dcsr);
253
254 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
255 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
256
257 fields[0].device = xscale->jtag_info.chain_pos;
258 fields[0].num_bits = 3;
259 fields[0].out_value = &field0;
260 fields[0].out_mask = NULL;
261 fields[0].in_value = NULL;
262 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
263
264 fields[1].device = xscale->jtag_info.chain_pos;
265 fields[1].num_bits = 32;
266 fields[1].out_value = NULL;
267 fields[1].out_mask = NULL;
268 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
269 fields[1].in_handler = NULL;
270 fields[1].in_handler_priv = NULL;
271 fields[1].in_check_value = NULL;
272 fields[1].in_check_mask = NULL;
273
274 fields[2].device = xscale->jtag_info.chain_pos;
275 fields[2].num_bits = 1;
276 fields[2].out_value = &field2;
277 fields[2].out_mask = NULL;
278 fields[2].in_value = NULL;
279 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
280
281 jtag_add_dr_scan(3, fields, -1);
282
283 if ((retval = jtag_execute_queue()) != ERROR_OK)
284 {
285 LOG_ERROR("JTAG error while reading DCSR");
286 return retval;
287 }
288
289 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
290 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
291
292 /* write the register with the value we just read
293 * on this second pass, only the first bit of field0 is guaranteed to be 0)
294 */
295 field0_check_mask = 0x1;
296 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
297 fields[1].in_value = NULL;
298
299 jtag_add_end_state(TAP_RTI);
300
301 jtag_add_dr_scan(3, fields, -1);
302
303 /* DANGER!!! this must be here. It will make sure that the arguments
304 * to jtag_set_check_value() does not go out of scope! */
305 return jtag_execute_queue();
306 }
307
308 int xscale_receive(target_t *target, u32 *buffer, int num_words)
309 {
310 if (num_words==0)
311 return ERROR_INVALID_ARGUMENTS;
312
313 int retval=ERROR_OK;
314 armv4_5_common_t *armv4_5 = target->arch_info;
315 xscale_common_t *xscale = armv4_5->arch_info;
316
317 enum tap_state path[3];
318 scan_field_t fields[3];
319
320 u8 *field0 = malloc(num_words * 1);
321 u8 field0_check_value = 0x2;
322 u8 field0_check_mask = 0x6;
323 u32 *field1 = malloc(num_words * 4);
324 u8 field2_check_value = 0x0;
325 u8 field2_check_mask = 0x1;
326 int words_done = 0;
327 int words_scheduled = 0;
328
329 int i;
330
331 path[0] = TAP_SDS;
332 path[1] = TAP_CD;
333 path[2] = TAP_SD;
334
335 fields[0].device = xscale->jtag_info.chain_pos;
336 fields[0].num_bits = 3;
337 fields[0].out_value = NULL;
338 fields[0].out_mask = NULL;
339 fields[0].in_value = NULL;
340 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
341
342 fields[1].device = xscale->jtag_info.chain_pos;
343 fields[1].num_bits = 32;
344 fields[1].out_value = NULL;
345 fields[1].out_mask = NULL;
346 fields[1].in_value = NULL;
347 fields[1].in_handler = NULL;
348 fields[1].in_handler_priv = NULL;
349 fields[1].in_check_value = NULL;
350 fields[1].in_check_mask = NULL;
351
352
353
354 fields[2].device = xscale->jtag_info.chain_pos;
355 fields[2].num_bits = 1;
356 fields[2].out_value = NULL;
357 fields[2].out_mask = NULL;
358 fields[2].in_value = NULL;
359 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
360
361 jtag_add_end_state(TAP_RTI);
362 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgtx);
363 jtag_add_runtest(1, -1); /* ensures that we're in the TAP_RTI state as the above could be a no-op */
364
365 /* repeat until all words have been collected */
366 int attempts=0;
367 while (words_done < num_words)
368 {
369 /* schedule reads */
370 words_scheduled = 0;
371 for (i = words_done; i < num_words; i++)
372 {
373 fields[0].in_value = &field0[i];
374 fields[1].in_handler = buf_to_u32_handler;
375 fields[1].in_handler_priv = (u8*)&field1[i];
376
377 jtag_add_pathmove(3, path);
378 jtag_add_dr_scan(3, fields, TAP_RTI);
379 words_scheduled++;
380 }
381
382 if ((retval = jtag_execute_queue()) != ERROR_OK)
383 {
384 LOG_ERROR("JTAG error while receiving data from debug handler");
385 break;
386 }
387
388 /* examine results */
389 for (i = words_done; i < num_words; i++)
390 {
391 if (!(field0[0] & 1))
392 {
393 /* move backwards if necessary */
394 int j;
395 for (j = i; j < num_words - 1; j++)
396 {
397 field0[j] = field0[j+1];
398 field1[j] = field1[j+1];
399 }
400 words_scheduled--;
401 }
402 }
403 if (words_scheduled==0)
404 {
405 if (attempts++==1000)
406 {
407 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
408 retval=ERROR_TARGET_TIMEOUT;
409 break;
410 }
411 }
412
413 words_done += words_scheduled;
414 }
415
416 for (i = 0; i < num_words; i++)
417 *(buffer++) = buf_get_u32((u8*)&field1[i], 0, 32);
418
419 free(field1);
420
421 return retval;
422 }
423
424 int xscale_read_tx(target_t *target, int consume)
425 {
426 armv4_5_common_t *armv4_5 = target->arch_info;
427 xscale_common_t *xscale = armv4_5->arch_info;
428 enum tap_state path[3];
429 enum tap_state noconsume_path[6];
430
431 int retval;
432 struct timeval timeout, now;
433
434 scan_field_t fields[3];
435 u8 field0_in = 0x0;
436 u8 field0_check_value = 0x2;
437 u8 field0_check_mask = 0x6;
438 u8 field2_check_value = 0x0;
439 u8 field2_check_mask = 0x1;
440
441 jtag_add_end_state(TAP_RTI);
442
443 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgtx);
444
445 path[0] = TAP_SDS;
446 path[1] = TAP_CD;
447 path[2] = TAP_SD;
448
449 noconsume_path[0] = TAP_SDS;
450 noconsume_path[1] = TAP_CD;
451 noconsume_path[2] = TAP_E1D;
452 noconsume_path[3] = TAP_PD;
453 noconsume_path[4] = TAP_E2D;
454 noconsume_path[5] = TAP_SD;
455
456 fields[0].device = xscale->jtag_info.chain_pos;
457 fields[0].num_bits = 3;
458 fields[0].out_value = NULL;
459 fields[0].out_mask = NULL;
460 fields[0].in_value = &field0_in;
461 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
462
463 fields[1].device = xscale->jtag_info.chain_pos;
464 fields[1].num_bits = 32;
465 fields[1].out_value = NULL;
466 fields[1].out_mask = NULL;
467 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
468 fields[1].in_handler = NULL;
469 fields[1].in_handler_priv = NULL;
470 fields[1].in_check_value = NULL;
471 fields[1].in_check_mask = NULL;
472
473
474
475 fields[2].device = xscale->jtag_info.chain_pos;
476 fields[2].num_bits = 1;
477 fields[2].out_value = NULL;
478 fields[2].out_mask = NULL;
479 fields[2].in_value = NULL;
480 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
481
482 gettimeofday(&timeout, NULL);
483 timeval_add_time(&timeout, 1, 0);
484
485 for (;;)
486 {
487 int i;
488 for (i=0; i<100; i++)
489 {
490 /* if we want to consume the register content (i.e. clear TX_READY),
491 * we have to go straight from Capture-DR to Shift-DR
492 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
493 */
494 if (consume)
495 jtag_add_pathmove(3, path);
496 else
497 {
498 jtag_add_pathmove(sizeof(noconsume_path)/sizeof(*noconsume_path), noconsume_path);
499 }
500
501 jtag_add_dr_scan(3, fields, TAP_RTI);
502
503 if ((retval = jtag_execute_queue()) != ERROR_OK)
504 {
505 LOG_ERROR("JTAG error while reading TX");
506 return ERROR_TARGET_TIMEOUT;
507 }
508
509 gettimeofday(&now, NULL);
510 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
511 {
512 LOG_ERROR("time out reading TX register");
513 return ERROR_TARGET_TIMEOUT;
514 }
515 if (!((!(field0_in & 1)) && consume))
516 {
517 goto done;
518 }
519 }
520 LOG_DEBUG("waiting 10ms");
521 usleep(10*1000); /* avoid flooding the logs */
522 }
523 done:
524
525 if (!(field0_in & 1))
526 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
527
528 return ERROR_OK;
529 }
530
531 int xscale_write_rx(target_t *target)
532 {
533 armv4_5_common_t *armv4_5 = target->arch_info;
534 xscale_common_t *xscale = armv4_5->arch_info;
535
536 int retval;
537 struct timeval timeout, now;
538
539 scan_field_t fields[3];
540 u8 field0_out = 0x0;
541 u8 field0_in = 0x0;
542 u8 field0_check_value = 0x2;
543 u8 field0_check_mask = 0x6;
544 u8 field2 = 0x0;
545 u8 field2_check_value = 0x0;
546 u8 field2_check_mask = 0x1;
547
548 jtag_add_end_state(TAP_RTI);
549
550 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgrx);
551
552 fields[0].device = xscale->jtag_info.chain_pos;
553 fields[0].num_bits = 3;
554 fields[0].out_value = &field0_out;
555 fields[0].out_mask = NULL;
556 fields[0].in_value = &field0_in;
557 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
558
559 fields[1].device = xscale->jtag_info.chain_pos;
560 fields[1].num_bits = 32;
561 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
562 fields[1].out_mask = NULL;
563 fields[1].in_value = NULL;
564 fields[1].in_handler = NULL;
565 fields[1].in_handler_priv = NULL;
566 fields[1].in_check_value = NULL;
567 fields[1].in_check_mask = NULL;
568
569
570
571 fields[2].device = xscale->jtag_info.chain_pos;
572 fields[2].num_bits = 1;
573 fields[2].out_value = &field2;
574 fields[2].out_mask = NULL;
575 fields[2].in_value = NULL;
576 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
577
578 gettimeofday(&timeout, NULL);
579 timeval_add_time(&timeout, 1, 0);
580
581 /* poll until rx_read is low */
582 LOG_DEBUG("polling RX");
583 for (;;)
584 {
585 int i;
586 for (i=0; i<10; i++)
587 {
588 jtag_add_dr_scan(3, fields, TAP_RTI);
589
590 if ((retval = jtag_execute_queue()) != ERROR_OK)
591 {
592 LOG_ERROR("JTAG error while writing RX");
593 return retval;
594 }
595
596 gettimeofday(&now, NULL);
597 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
598 {
599 LOG_ERROR("time out writing RX register");
600 return ERROR_TARGET_TIMEOUT;
601 }
602 if (!(field0_in & 1))
603 goto done;
604 }
605 LOG_DEBUG("waiting 10ms");
606 usleep(10*1000); /* wait 10ms to avoid flooding the logs */
607 }
608 done:
609
610 /* set rx_valid */
611 field2 = 0x1;
612 jtag_add_dr_scan(3, fields, TAP_RTI);
613
614 if ((retval = jtag_execute_queue()) != ERROR_OK)
615 {
616 LOG_ERROR("JTAG error while writing RX");
617 return retval;
618 }
619
620 return ERROR_OK;
621 }
622
623 /* send count elements of size byte to the debug handler */
624 int xscale_send(target_t *target, u8 *buffer, int count, int size)
625 {
626 armv4_5_common_t *armv4_5 = target->arch_info;
627 xscale_common_t *xscale = armv4_5->arch_info;
628 u32 t[3];
629 int bits[3];
630
631 int retval;
632
633 int done_count = 0;
634
635 jtag_add_end_state(TAP_RTI);
636
637 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgrx);
638
639 bits[0]=3;
640 t[0]=0;
641 bits[1]=32;
642 t[2]=1;
643 bits[2]=1;
644 int endianness = target->endianness;
645 while (done_count++ < count)
646 {
647 switch (size)
648 {
649 case 4:
650 if (endianness == TARGET_LITTLE_ENDIAN)
651 {
652 t[1]=le_to_h_u32(buffer);
653 } else
654 {
655 t[1]=be_to_h_u32(buffer);
656 }
657 break;
658 case 2:
659 if (endianness == TARGET_LITTLE_ENDIAN)
660 {
661 t[1]=le_to_h_u16(buffer);
662 } else
663 {
664 t[1]=be_to_h_u16(buffer);
665 }
666 break;
667 case 1:
668 t[1]=buffer[0];
669 break;
670 default:
671 LOG_ERROR("BUG: size neither 4, 2 nor 1");
672 exit(-1);
673 }
674 jtag_add_dr_out(xscale->jtag_info.chain_pos,
675 3,
676 bits,
677 t,
678 TAP_RTI);
679 buffer += size;
680 }
681
682 if ((retval = jtag_execute_queue()) != ERROR_OK)
683 {
684 LOG_ERROR("JTAG error while sending data to debug handler");
685 return retval;
686 }
687
688 return ERROR_OK;
689 }
690
691 int xscale_send_u32(target_t *target, u32 value)
692 {
693 armv4_5_common_t *armv4_5 = target->arch_info;
694 xscale_common_t *xscale = armv4_5->arch_info;
695
696 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
697 return xscale_write_rx(target);
698 }
699
700 int xscale_write_dcsr(target_t *target, int hold_rst, int ext_dbg_brk)
701 {
702 armv4_5_common_t *armv4_5 = target->arch_info;
703 xscale_common_t *xscale = armv4_5->arch_info;
704
705 int retval;
706
707 scan_field_t fields[3];
708 u8 field0 = 0x0;
709 u8 field0_check_value = 0x2;
710 u8 field0_check_mask = 0x7;
711 u8 field2 = 0x0;
712 u8 field2_check_value = 0x0;
713 u8 field2_check_mask = 0x1;
714
715 if (hold_rst != -1)
716 xscale->hold_rst = hold_rst;
717
718 if (ext_dbg_brk != -1)
719 xscale->external_debug_break = ext_dbg_brk;
720
721 jtag_add_end_state(TAP_RTI);
722 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dcsr);
723
724 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
725 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
726
727 fields[0].device = xscale->jtag_info.chain_pos;
728 fields[0].num_bits = 3;
729 fields[0].out_value = &field0;
730 fields[0].out_mask = NULL;
731 fields[0].in_value = NULL;
732 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
733
734 fields[1].device = xscale->jtag_info.chain_pos;
735 fields[1].num_bits = 32;
736 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
737 fields[1].out_mask = NULL;
738 fields[1].in_value = NULL;
739 fields[1].in_handler = NULL;
740 fields[1].in_handler_priv = NULL;
741 fields[1].in_check_value = NULL;
742 fields[1].in_check_mask = NULL;
743
744
745
746 fields[2].device = xscale->jtag_info.chain_pos;
747 fields[2].num_bits = 1;
748 fields[2].out_value = &field2;
749 fields[2].out_mask = NULL;
750 fields[2].in_value = NULL;
751 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
752
753 jtag_add_dr_scan(3, fields, -1);
754
755 if ((retval = jtag_execute_queue()) != ERROR_OK)
756 {
757 LOG_ERROR("JTAG error while writing DCSR");
758 return retval;
759 }
760
761 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
762 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
763
764 return ERROR_OK;
765 }
766
767 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
768 unsigned int parity (unsigned int v)
769 {
770 unsigned int ov = v;
771 v ^= v >> 16;
772 v ^= v >> 8;
773 v ^= v >> 4;
774 v &= 0xf;
775 LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
776 return (0x6996 >> v) & 1;
777 }
778
779 int xscale_load_ic(target_t *target, int mini, u32 va, u32 buffer[8])
780 {
781 armv4_5_common_t *armv4_5 = target->arch_info;
782 xscale_common_t *xscale = armv4_5->arch_info;
783 u8 packet[4];
784 u8 cmd;
785 int word;
786
787 scan_field_t fields[2];
788
789 LOG_DEBUG("loading miniIC at 0x%8.8x", va);
790
791 jtag_add_end_state(TAP_RTI);
792 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.ldic); /* LDIC */
793
794 /* CMD is b010 for Main IC and b011 for Mini IC */
795 if (mini)
796 buf_set_u32(&cmd, 0, 3, 0x3);
797 else
798 buf_set_u32(&cmd, 0, 3, 0x2);
799
800 buf_set_u32(&cmd, 3, 3, 0x0);
801
802 /* virtual address of desired cache line */
803 buf_set_u32(packet, 0, 27, va >> 5);
804
805 fields[0].device = xscale->jtag_info.chain_pos;
806 fields[0].num_bits = 6;
807 fields[0].out_value = &cmd;
808 fields[0].out_mask = NULL;
809 fields[0].in_value = NULL;
810 fields[0].in_check_value = NULL;
811 fields[0].in_check_mask = NULL;
812 fields[0].in_handler = NULL;
813 fields[0].in_handler_priv = NULL;
814
815 fields[1].device = xscale->jtag_info.chain_pos;
816 fields[1].num_bits = 27;
817 fields[1].out_value = packet;
818 fields[1].out_mask = NULL;
819 fields[1].in_value = NULL;
820 fields[1].in_check_value = NULL;
821 fields[1].in_check_mask = NULL;
822 fields[1].in_handler = NULL;
823 fields[1].in_handler_priv = NULL;
824
825 jtag_add_dr_scan(2, fields, -1);
826
827 fields[0].num_bits = 32;
828 fields[0].out_value = packet;
829
830 fields[1].num_bits = 1;
831 fields[1].out_value = &cmd;
832
833 for (word = 0; word < 8; word++)
834 {
835 buf_set_u32(packet, 0, 32, buffer[word]);
836 cmd = parity(*((u32*)packet));
837 jtag_add_dr_scan(2, fields, -1);
838 }
839
840 jtag_execute_queue();
841
842 return ERROR_OK;
843 }
844
845 int xscale_invalidate_ic_line(target_t *target, u32 va)
846 {
847 armv4_5_common_t *armv4_5 = target->arch_info;
848 xscale_common_t *xscale = armv4_5->arch_info;
849 u8 packet[4];
850 u8 cmd;
851
852 scan_field_t fields[2];
853
854 jtag_add_end_state(TAP_RTI);
855 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.ldic); /* LDIC */
856
857 /* CMD for invalidate IC line b000, bits [6:4] b000 */
858 buf_set_u32(&cmd, 0, 6, 0x0);
859
860 /* virtual address of desired cache line */
861 buf_set_u32(packet, 0, 27, va >> 5);
862
863 fields[0].device = xscale->jtag_info.chain_pos;
864 fields[0].num_bits = 6;
865 fields[0].out_value = &cmd;
866 fields[0].out_mask = NULL;
867 fields[0].in_value = NULL;
868 fields[0].in_check_value = NULL;
869 fields[0].in_check_mask = NULL;
870 fields[0].in_handler = NULL;
871 fields[0].in_handler_priv = NULL;
872
873 fields[1].device = xscale->jtag_info.chain_pos;
874 fields[1].num_bits = 27;
875 fields[1].out_value = packet;
876 fields[1].out_mask = NULL;
877 fields[1].in_value = NULL;
878 fields[1].in_check_value = NULL;
879 fields[1].in_check_mask = NULL;
880 fields[1].in_handler = NULL;
881 fields[1].in_handler_priv = NULL;
882
883 jtag_add_dr_scan(2, fields, -1);
884
885 return ERROR_OK;
886 }
887
888 int xscale_update_vectors(target_t *target)
889 {
890 armv4_5_common_t *armv4_5 = target->arch_info;
891 xscale_common_t *xscale = armv4_5->arch_info;
892 int i;
893 int retval;
894
895 u32 low_reset_branch, high_reset_branch;
896
897 for (i = 1; i < 8; i++)
898 {
899 /* if there's a static vector specified for this exception, override */
900 if (xscale->static_high_vectors_set & (1 << i))
901 {
902 xscale->high_vectors[i] = xscale->static_high_vectors[i];
903 }
904 else
905 {
906 retval=target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
907 if (retval == ERROR_TARGET_TIMEOUT)
908 return retval;
909 if (retval!=ERROR_OK)
910 {
911 /* Some of these reads will fail as part of normal execution */
912 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
913 }
914 }
915 }
916
917 for (i = 1; i < 8; i++)
918 {
919 if (xscale->static_low_vectors_set & (1 << i))
920 {
921 xscale->low_vectors[i] = xscale->static_low_vectors[i];
922 }
923 else
924 {
925 retval=target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
926 if (retval == ERROR_TARGET_TIMEOUT)
927 return retval;
928 if (retval!=ERROR_OK)
929 {
930 /* Some of these reads will fail as part of normal execution */
931 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
932 }
933 }
934 }
935
936 /* calculate branches to debug handler */
937 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
938 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
939
940 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
941 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
942
943 /* invalidate and load exception vectors in mini i-cache */
944 xscale_invalidate_ic_line(target, 0x0);
945 xscale_invalidate_ic_line(target, 0xffff0000);
946
947 xscale_load_ic(target, 1, 0x0, xscale->low_vectors);
948 xscale_load_ic(target, 1, 0xffff0000, xscale->high_vectors);
949
950 return ERROR_OK;
951 }
952
953 int xscale_arch_state(struct target_s *target)
954 {
955 armv4_5_common_t *armv4_5 = target->arch_info;
956 xscale_common_t *xscale = armv4_5->arch_info;
957
958 char *state[] =
959 {
960 "disabled", "enabled"
961 };
962
963 char *arch_dbg_reason[] =
964 {
965 "", "\n(processor reset)", "\n(trace buffer full)"
966 };
967
968 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
969 {
970 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
971 exit(-1);
972 }
973
974 LOG_USER("target halted in %s state due to %s, current mode: %s\n"
975 "cpsr: 0x%8.8x pc: 0x%8.8x\n"
976 "MMU: %s, D-Cache: %s, I-Cache: %s"
977 "%s",
978 armv4_5_state_strings[armv4_5->core_state],
979 target_debug_reason_strings[target->debug_reason],
980 armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)],
981 buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32),
982 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32),
983 state[xscale->armv4_5_mmu.mmu_enabled],
984 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
985 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
986 arch_dbg_reason[xscale->arch_debug_reason]);
987
988 return ERROR_OK;
989 }
990
991 int xscale_poll(target_t *target)
992 {
993 int retval=ERROR_OK;
994 armv4_5_common_t *armv4_5 = target->arch_info;
995 xscale_common_t *xscale = armv4_5->arch_info;
996
997 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
998 {
999 enum target_state previous_state = target->state;
1000 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
1001 {
1002
1003 /* there's data to read from the tx register, we entered debug state */
1004 xscale->handler_running = 1;
1005
1006 target->state = TARGET_HALTED;
1007
1008 /* process debug entry, fetching current mode regs */
1009 retval = xscale_debug_entry(target);
1010 }
1011 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
1012 {
1013 LOG_USER("error while polling TX register, reset CPU");
1014 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
1015 target->state = TARGET_HALTED;
1016 }
1017
1018 /* debug_entry could have overwritten target state (i.e. immediate resume)
1019 * don't signal event handlers in that case
1020 */
1021 if (target->state != TARGET_HALTED)
1022 return ERROR_OK;
1023
1024 /* if target was running, signal that we halted
1025 * otherwise we reentered from debug execution */
1026 if (previous_state == TARGET_RUNNING)
1027 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1028 else
1029 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
1030 }
1031
1032 return retval;
1033 }
1034
1035 int xscale_debug_entry(target_t *target)
1036 {
1037 armv4_5_common_t *armv4_5 = target->arch_info;
1038 xscale_common_t *xscale = armv4_5->arch_info;
1039 u32 pc;
1040 u32 buffer[10];
1041 int i;
1042 int retval;
1043
1044 u32 moe;
1045
1046 /* clear external dbg break (will be written on next DCSR read) */
1047 xscale->external_debug_break = 0;
1048 if ((retval=xscale_read_dcsr(target))!=ERROR_OK)
1049 return retval;
1050
1051 /* get r0, pc, r1 to r7 and cpsr */
1052 if ((retval=xscale_receive(target, buffer, 10))!=ERROR_OK)
1053 return retval;
1054
1055 /* move r0 from buffer to register cache */
1056 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
1057 armv4_5->core_cache->reg_list[15].dirty = 1;
1058 armv4_5->core_cache->reg_list[15].valid = 1;
1059 LOG_DEBUG("r0: 0x%8.8x", buffer[0]);
1060
1061 /* move pc from buffer to register cache */
1062 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
1063 armv4_5->core_cache->reg_list[15].dirty = 1;
1064 armv4_5->core_cache->reg_list[15].valid = 1;
1065 LOG_DEBUG("pc: 0x%8.8x", buffer[1]);
1066
1067 /* move data from buffer to register cache */
1068 for (i = 1; i <= 7; i++)
1069 {
1070 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
1071 armv4_5->core_cache->reg_list[i].dirty = 1;
1072 armv4_5->core_cache->reg_list[i].valid = 1;
1073 LOG_DEBUG("r%i: 0x%8.8x", i, buffer[i + 1]);
1074 }
1075
1076 buf_set_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32, buffer[9]);
1077 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty = 1;
1078 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].valid = 1;
1079 LOG_DEBUG("cpsr: 0x%8.8x", buffer[9]);
1080
1081 armv4_5->core_mode = buffer[9] & 0x1f;
1082 if (armv4_5_mode_to_number(armv4_5->core_mode) == -1)
1083 {
1084 target->state = TARGET_UNKNOWN;
1085 LOG_ERROR("cpsr contains invalid mode value - communication failure");
1086 return ERROR_TARGET_FAILURE;
1087 }
1088 LOG_DEBUG("target entered debug state in %s mode", armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)]);
1089
1090 if (buffer[9] & 0x20)
1091 armv4_5->core_state = ARMV4_5_STATE_THUMB;
1092 else
1093 armv4_5->core_state = ARMV4_5_STATE_ARM;
1094
1095
1096 if (armv4_5_mode_to_number(armv4_5->core_mode)==-1)
1097 return ERROR_FAIL;
1098
1099 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1100 if ((armv4_5->core_mode != ARMV4_5_MODE_USR) && (armv4_5->core_mode != ARMV4_5_MODE_SYS))
1101 {
1102 xscale_receive(target, buffer, 8);
1103 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1104 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).dirty = 0;
1105 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).valid = 1;
1106 }
1107 else
1108 {
1109 /* r8 to r14, but no spsr */
1110 xscale_receive(target, buffer, 7);
1111 }
1112
1113 /* move data from buffer to register cache */
1114 for (i = 8; i <= 14; i++)
1115 {
1116 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).value, 0, 32, buffer[i - 8]);
1117 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 0;
1118 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid = 1;
1119 }
1120
1121 /* examine debug reason */
1122 xscale_read_dcsr(target);
1123 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
1124
1125 /* stored PC (for calculating fixup) */
1126 pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1127
1128 switch (moe)
1129 {
1130 case 0x0: /* Processor reset */
1131 target->debug_reason = DBG_REASON_DBGRQ;
1132 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
1133 pc -= 4;
1134 break;
1135 case 0x1: /* Instruction breakpoint hit */
1136 target->debug_reason = DBG_REASON_BREAKPOINT;
1137 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1138 pc -= 4;
1139 break;
1140 case 0x2: /* Data breakpoint hit */
1141 target->debug_reason = DBG_REASON_WATCHPOINT;
1142 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1143 pc -= 4;
1144 break;
1145 case 0x3: /* BKPT instruction executed */
1146 target->debug_reason = DBG_REASON_BREAKPOINT;
1147 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1148 pc -= 4;
1149 break;
1150 case 0x4: /* Ext. debug event */
1151 target->debug_reason = DBG_REASON_DBGRQ;
1152 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1153 pc -= 4;
1154 break;
1155 case 0x5: /* Vector trap occured */
1156 target->debug_reason = DBG_REASON_BREAKPOINT;
1157 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1158 pc -= 4;
1159 break;
1160 case 0x6: /* Trace buffer full break */
1161 target->debug_reason = DBG_REASON_DBGRQ;
1162 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1163 pc -= 4;
1164 break;
1165 case 0x7: /* Reserved */
1166 default:
1167 LOG_ERROR("Method of Entry is 'Reserved'");
1168 exit(-1);
1169 break;
1170 }
1171
1172 /* apply PC fixup */
1173 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
1174
1175 /* on the first debug entry, identify cache type */
1176 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1177 {
1178 u32 cache_type_reg;
1179
1180 /* read cp15 cache type register */
1181 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1182 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1183
1184 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1185 }
1186
1187 /* examine MMU and Cache settings */
1188 /* read cp15 control register */
1189 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1190 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1191 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1192 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1193 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1194
1195 /* tracing enabled, read collected trace data */
1196 if (xscale->trace.buffer_enabled)
1197 {
1198 xscale_read_trace(target);
1199 xscale->trace.buffer_fill--;
1200
1201 /* resume if we're still collecting trace data */
1202 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1203 && (xscale->trace.buffer_fill > 0))
1204 {
1205 xscale_resume(target, 1, 0x0, 1, 0);
1206 }
1207 else
1208 {
1209 xscale->trace.buffer_enabled = 0;
1210 }
1211 }
1212
1213 return ERROR_OK;
1214 }
1215
1216 int xscale_halt(target_t *target)
1217 {
1218 armv4_5_common_t *armv4_5 = target->arch_info;
1219 xscale_common_t *xscale = armv4_5->arch_info;
1220
1221 LOG_DEBUG("target->state: %s", target_state_strings[target->state]);
1222
1223 if (target->state == TARGET_HALTED)
1224 {
1225 LOG_DEBUG("target was already halted");
1226 return ERROR_OK;
1227 }
1228 else if (target->state == TARGET_UNKNOWN)
1229 {
1230 /* this must not happen for a xscale target */
1231 LOG_ERROR("target was in unknown state when halt was requested");
1232 return ERROR_TARGET_INVALID;
1233 }
1234 else if (target->state == TARGET_RESET)
1235 {
1236 LOG_DEBUG("target->state == TARGET_RESET");
1237 }
1238 else
1239 {
1240 /* assert external dbg break */
1241 xscale->external_debug_break = 1;
1242 xscale_read_dcsr(target);
1243
1244 target->debug_reason = DBG_REASON_DBGRQ;
1245 }
1246
1247 return ERROR_OK;
1248 }
1249
1250 int xscale_enable_single_step(struct target_s *target, u32 next_pc)
1251 {
1252 armv4_5_common_t *armv4_5 = target->arch_info;
1253 xscale_common_t *xscale= armv4_5->arch_info;
1254 reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1255
1256 if (xscale->ibcr0_used)
1257 {
1258 breakpoint_t *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1259
1260 if (ibcr0_bp)
1261 {
1262 xscale_unset_breakpoint(target, ibcr0_bp);
1263 }
1264 else
1265 {
1266 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1267 exit(-1);
1268 }
1269 }
1270
1271 xscale_set_reg_u32(ibcr0, next_pc | 0x1);
1272
1273 return ERROR_OK;
1274 }
1275
1276 int xscale_disable_single_step(struct target_s *target)
1277 {
1278 armv4_5_common_t *armv4_5 = target->arch_info;
1279 xscale_common_t *xscale= armv4_5->arch_info;
1280 reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1281
1282 xscale_set_reg_u32(ibcr0, 0x0);
1283
1284 return ERROR_OK;
1285 }
1286
1287 int xscale_resume(struct target_s *target, int current, u32 address, int handle_breakpoints, int debug_execution)
1288 {
1289 armv4_5_common_t *armv4_5 = target->arch_info;
1290 xscale_common_t *xscale= armv4_5->arch_info;
1291 breakpoint_t *breakpoint = target->breakpoints;
1292
1293 u32 current_pc;
1294
1295 int retval;
1296 int i;
1297
1298 LOG_DEBUG("-");
1299
1300 if (target->state != TARGET_HALTED)
1301 {
1302 LOG_WARNING("target not halted");
1303 return ERROR_TARGET_NOT_HALTED;
1304 }
1305
1306 if (!debug_execution)
1307 {
1308 target_free_all_working_areas(target);
1309 }
1310
1311 /* update vector tables */
1312 if ((retval=xscale_update_vectors(target))!=ERROR_OK)
1313 return retval;
1314
1315 /* current = 1: continue on current pc, otherwise continue at <address> */
1316 if (!current)
1317 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1318
1319 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1320
1321 /* if we're at the reset vector, we have to simulate the branch */
1322 if (current_pc == 0x0)
1323 {
1324 arm_simulate_step(target, NULL);
1325 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1326 }
1327
1328 /* the front-end may request us not to handle breakpoints */
1329 if (handle_breakpoints)
1330 {
1331 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1332 {
1333 u32 next_pc;
1334
1335 /* there's a breakpoint at the current PC, we have to step over it */
1336 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1337 xscale_unset_breakpoint(target, breakpoint);
1338
1339 /* calculate PC of next instruction */
1340 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1341 {
1342 u32 current_opcode;
1343 target_read_u32(target, current_pc, &current_opcode);
1344 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8x", current_opcode);
1345 }
1346
1347 LOG_DEBUG("enable single-step");
1348 xscale_enable_single_step(target, next_pc);
1349
1350 /* restore banked registers */
1351 xscale_restore_context(target);
1352
1353 /* send resume request (command 0x30 or 0x31)
1354 * clean the trace buffer if it is to be enabled (0x62) */
1355 if (xscale->trace.buffer_enabled)
1356 {
1357 xscale_send_u32(target, 0x62);
1358 xscale_send_u32(target, 0x31);
1359 }
1360 else
1361 xscale_send_u32(target, 0x30);
1362
1363 /* send CPSR */
1364 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1365 LOG_DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1366
1367 for (i = 7; i >= 0; i--)
1368 {
1369 /* send register */
1370 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1371 LOG_DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1372 }
1373
1374 /* send PC */
1375 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1376 LOG_DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1377
1378 /* wait for and process debug entry */
1379 xscale_debug_entry(target);
1380
1381 LOG_DEBUG("disable single-step");
1382 xscale_disable_single_step(target);
1383
1384 LOG_DEBUG("set breakpoint at 0x%8.8x", breakpoint->address);
1385 xscale_set_breakpoint(target, breakpoint);
1386 }
1387 }
1388
1389 /* enable any pending breakpoints and watchpoints */
1390 xscale_enable_breakpoints(target);
1391 xscale_enable_watchpoints(target);
1392
1393 /* restore banked registers */
1394 xscale_restore_context(target);
1395
1396 /* send resume request (command 0x30 or 0x31)
1397 * clean the trace buffer if it is to be enabled (0x62) */
1398 if (xscale->trace.buffer_enabled)
1399 {
1400 xscale_send_u32(target, 0x62);
1401 xscale_send_u32(target, 0x31);
1402 }
1403 else
1404 xscale_send_u32(target, 0x30);
1405
1406 /* send CPSR */
1407 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1408 LOG_DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1409
1410 for (i = 7; i >= 0; i--)
1411 {
1412 /* send register */
1413 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1414 LOG_DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1415 }
1416
1417 /* send PC */
1418 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1419 LOG_DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1420
1421 target->debug_reason = DBG_REASON_NOTHALTED;
1422
1423 if (!debug_execution)
1424 {
1425 /* registers are now invalid */
1426 armv4_5_invalidate_core_regs(target);
1427 target->state = TARGET_RUNNING;
1428 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1429 }
1430 else
1431 {
1432 target->state = TARGET_DEBUG_RUNNING;
1433 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1434 }
1435
1436 LOG_DEBUG("target resumed");
1437
1438 xscale->handler_running = 1;
1439
1440 return ERROR_OK;
1441 }
1442
1443 int xscale_step(struct target_s *target, int current, u32 address, int handle_breakpoints)
1444 {
1445 armv4_5_common_t *armv4_5 = target->arch_info;
1446 xscale_common_t *xscale = armv4_5->arch_info;
1447 breakpoint_t *breakpoint = target->breakpoints;
1448
1449 u32 current_pc, next_pc;
1450 int i;
1451 int retval;
1452
1453 if (target->state != TARGET_HALTED)
1454 {
1455 LOG_WARNING("target not halted");
1456 return ERROR_TARGET_NOT_HALTED;
1457 }
1458
1459 /* current = 1: continue on current pc, otherwise continue at <address> */
1460 if (!current)
1461 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1462
1463 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1464
1465 /* if we're at the reset vector, we have to simulate the step */
1466 if (current_pc == 0x0)
1467 {
1468 arm_simulate_step(target, NULL);
1469 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1470
1471 target->debug_reason = DBG_REASON_SINGLESTEP;
1472 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1473
1474 return ERROR_OK;
1475 }
1476
1477 /* the front-end may request us not to handle breakpoints */
1478 if (handle_breakpoints)
1479 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1480 {
1481 xscale_unset_breakpoint(target, breakpoint);
1482 }
1483
1484 target->debug_reason = DBG_REASON_SINGLESTEP;
1485
1486 /* calculate PC of next instruction */
1487 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1488 {
1489 u32 current_opcode;
1490 target_read_u32(target, current_pc, &current_opcode);
1491 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8x", current_opcode);
1492 }
1493
1494 LOG_DEBUG("enable single-step");
1495 xscale_enable_single_step(target, next_pc);
1496
1497 /* restore banked registers */
1498 xscale_restore_context(target);
1499
1500 /* send resume request (command 0x30 or 0x31)
1501 * clean the trace buffer if it is to be enabled (0x62) */
1502 if (xscale->trace.buffer_enabled)
1503 {
1504 xscale_send_u32(target, 0x62);
1505 xscale_send_u32(target, 0x31);
1506 }
1507 else
1508 xscale_send_u32(target, 0x30);
1509
1510 /* send CPSR */
1511 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1512 LOG_DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1513
1514 for (i = 7; i >= 0; i--)
1515 {
1516 /* send register */
1517 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1518 LOG_DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1519 }
1520
1521 /* send PC */
1522 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1523 LOG_DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1524
1525 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1526
1527 /* registers are now invalid */
1528 armv4_5_invalidate_core_regs(target);
1529
1530 /* wait for and process debug entry */
1531 xscale_debug_entry(target);
1532
1533 LOG_DEBUG("disable single-step");
1534 xscale_disable_single_step(target);
1535
1536 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1537
1538 if (breakpoint)
1539 {
1540 xscale_set_breakpoint(target, breakpoint);
1541 }
1542
1543 LOG_DEBUG("target stepped");
1544
1545 return ERROR_OK;
1546
1547 }
1548
1549 int xscale_assert_reset(target_t *target)
1550 {
1551 armv4_5_common_t *armv4_5 = target->arch_info;
1552 xscale_common_t *xscale = armv4_5->arch_info;
1553
1554 LOG_DEBUG("target->state: %s", target_state_strings[target->state]);
1555
1556 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1557 * end up in T-L-R, which would reset JTAG
1558 */
1559 jtag_add_end_state(TAP_RTI);
1560 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dcsr);
1561
1562 /* set Hold reset, Halt mode and Trap Reset */
1563 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1564 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1565 xscale_write_dcsr(target, 1, 0);
1566
1567 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1568 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, 0x7f);
1569 jtag_execute_queue();
1570
1571 /* assert reset */
1572 jtag_add_reset(0, 1);
1573
1574 /* sleep 1ms, to be sure we fulfill any requirements */
1575 jtag_add_sleep(1000);
1576 jtag_execute_queue();
1577
1578 target->state = TARGET_RESET;
1579
1580 return ERROR_OK;
1581 }
1582
1583 int xscale_deassert_reset(target_t *target)
1584 {
1585 armv4_5_common_t *armv4_5 = target->arch_info;
1586 xscale_common_t *xscale = armv4_5->arch_info;
1587
1588 fileio_t debug_handler;
1589 u32 address;
1590 u32 binary_size;
1591
1592 u32 buf_cnt;
1593 int i;
1594 int retval;
1595
1596 breakpoint_t *breakpoint = target->breakpoints;
1597
1598 LOG_DEBUG("-");
1599
1600 xscale->ibcr_available = 2;
1601 xscale->ibcr0_used = 0;
1602 xscale->ibcr1_used = 0;
1603
1604 xscale->dbr_available = 2;
1605 xscale->dbr0_used = 0;
1606 xscale->dbr1_used = 0;
1607
1608 /* mark all hardware breakpoints as unset */
1609 while (breakpoint)
1610 {
1611 if (breakpoint->type == BKPT_HARD)
1612 {
1613 breakpoint->set = 0;
1614 }
1615 breakpoint = breakpoint->next;
1616 }
1617
1618 if (!xscale->handler_installed)
1619 {
1620 /* release SRST */
1621 jtag_add_reset(0, 0);
1622
1623 /* wait 300ms; 150 and 100ms were not enough */
1624 jtag_add_sleep(300*1000);
1625
1626 jtag_add_runtest(2030, TAP_RTI);
1627 jtag_execute_queue();
1628
1629 /* set Hold reset, Halt mode and Trap Reset */
1630 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1631 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1632 xscale_write_dcsr(target, 1, 0);
1633
1634 /* Load debug handler */
1635 if (fileio_open(&debug_handler, "xscale/debug_handler.bin", FILEIO_READ, FILEIO_BINARY) != ERROR_OK)
1636 {
1637 return ERROR_OK;
1638 }
1639
1640 if ((binary_size = debug_handler.size) % 4)
1641 {
1642 LOG_ERROR("debug_handler.bin: size not a multiple of 4");
1643 exit(-1);
1644 }
1645
1646 if (binary_size > 0x800)
1647 {
1648 LOG_ERROR("debug_handler.bin: larger than 2kb");
1649 exit(-1);
1650 }
1651
1652 binary_size = CEIL(binary_size, 32) * 32;
1653
1654 address = xscale->handler_address;
1655 while (binary_size > 0)
1656 {
1657 u32 cache_line[8];
1658 u8 buffer[32];
1659
1660 if ((retval = fileio_read(&debug_handler, 32, buffer, &buf_cnt)) != ERROR_OK)
1661 {
1662
1663 }
1664
1665 for (i = 0; i < buf_cnt; i += 4)
1666 {
1667 /* convert LE buffer to host-endian u32 */
1668 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1669 }
1670
1671 for (; buf_cnt < 32; buf_cnt += 4)
1672 {
1673 cache_line[buf_cnt / 4] = 0xe1a08008;
1674 }
1675
1676 /* only load addresses other than the reset vectors */
1677 if ((address % 0x400) != 0x0)
1678 {
1679 xscale_load_ic(target, 1, address, cache_line);
1680 }
1681
1682 address += buf_cnt;
1683 binary_size -= buf_cnt;
1684 };
1685
1686 xscale_load_ic(target, 1, 0x0, xscale->low_vectors);
1687 xscale_load_ic(target, 1, 0xffff0000, xscale->high_vectors);
1688
1689 jtag_add_runtest(30, TAP_RTI);
1690
1691 jtag_add_sleep(100000);
1692
1693 /* set Hold reset, Halt mode and Trap Reset */
1694 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1695 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1696 xscale_write_dcsr(target, 1, 0);
1697
1698 /* clear Hold reset to let the target run (should enter debug handler) */
1699 xscale_write_dcsr(target, 0, 1);
1700 target->state = TARGET_RUNNING;
1701
1702 if (!target->reset_halt)
1703 {
1704 jtag_add_sleep(10000);
1705
1706 /* we should have entered debug now */
1707 xscale_debug_entry(target);
1708 target->state = TARGET_HALTED;
1709
1710 /* resume the target */
1711 xscale_resume(target, 1, 0x0, 1, 0);
1712 }
1713
1714 fileio_close(&debug_handler);
1715 }
1716 else
1717 {
1718 jtag_add_reset(0, 0);
1719 }
1720
1721
1722 return ERROR_OK;
1723 }
1724
1725 int xscale_soft_reset_halt(struct target_s *target)
1726 {
1727
1728 return ERROR_OK;
1729 }
1730
1731 int xscale_read_core_reg(struct target_s *target, int num, enum armv4_5_mode mode)
1732 {
1733
1734 return ERROR_OK;
1735 }
1736
1737 int xscale_write_core_reg(struct target_s *target, int num, enum armv4_5_mode mode, u32 value)
1738 {
1739
1740 return ERROR_OK;
1741 }
1742
1743 int xscale_full_context(target_t *target)
1744 {
1745 armv4_5_common_t *armv4_5 = target->arch_info;
1746
1747 u32 *buffer;
1748
1749 int i, j;
1750
1751 LOG_DEBUG("-");
1752
1753 if (target->state != TARGET_HALTED)
1754 {
1755 LOG_WARNING("target not halted");
1756 return ERROR_TARGET_NOT_HALTED;
1757 }
1758
1759 buffer = malloc(4 * 8);
1760
1761 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1762 * we can't enter User mode on an XScale (unpredictable),
1763 * but User shares registers with SYS
1764 */
1765 for(i = 1; i < 7; i++)
1766 {
1767 int valid = 1;
1768
1769 /* check if there are invalid registers in the current mode
1770 */
1771 for (j = 0; j <= 16; j++)
1772 {
1773 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid == 0)
1774 valid = 0;
1775 }
1776
1777 if (!valid)
1778 {
1779 u32 tmp_cpsr;
1780
1781 /* request banked registers */
1782 xscale_send_u32(target, 0x0);
1783
1784 tmp_cpsr = 0x0;
1785 tmp_cpsr |= armv4_5_number_to_mode(i);
1786 tmp_cpsr |= 0xc0; /* I/F bits */
1787
1788 /* send CPSR for desired mode */
1789 xscale_send_u32(target, tmp_cpsr);
1790
1791 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1792 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1793 {
1794 xscale_receive(target, buffer, 8);
1795 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1796 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1797 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).valid = 1;
1798 }
1799 else
1800 {
1801 xscale_receive(target, buffer, 7);
1802 }
1803
1804 /* move data from buffer to register cache */
1805 for (j = 8; j <= 14; j++)
1806 {
1807 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).value, 0, 32, buffer[j - 8]);
1808 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1809 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid = 1;
1810 }
1811 }
1812 }
1813
1814 free(buffer);
1815
1816 return ERROR_OK;
1817 }
1818
1819 int xscale_restore_context(target_t *target)
1820 {
1821 armv4_5_common_t *armv4_5 = target->arch_info;
1822
1823 int i, j;
1824
1825 LOG_DEBUG("-");
1826
1827 if (target->state != TARGET_HALTED)
1828 {
1829 LOG_WARNING("target not halted");
1830 return ERROR_TARGET_NOT_HALTED;
1831 }
1832
1833 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1834 * we can't enter User mode on an XScale (unpredictable),
1835 * but User shares registers with SYS
1836 */
1837 for(i = 1; i < 7; i++)
1838 {
1839 int dirty = 0;
1840
1841 /* check if there are invalid registers in the current mode
1842 */
1843 for (j = 8; j <= 14; j++)
1844 {
1845 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty == 1)
1846 dirty = 1;
1847 }
1848
1849 /* if not USR/SYS, check if the SPSR needs to be written */
1850 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1851 {
1852 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty == 1)
1853 dirty = 1;
1854 }
1855
1856 if (dirty)
1857 {
1858 u32 tmp_cpsr;
1859
1860 /* send banked registers */
1861 xscale_send_u32(target, 0x1);
1862
1863 tmp_cpsr = 0x0;
1864 tmp_cpsr |= armv4_5_number_to_mode(i);
1865 tmp_cpsr |= 0xc0; /* I/F bits */
1866
1867 /* send CPSR for desired mode */
1868 xscale_send_u32(target, tmp_cpsr);
1869
1870 /* send banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1871 for (j = 8; j <= 14; j++)
1872 {
1873 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, j).value, 0, 32));
1874 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1875 }
1876
1877 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1878 {
1879 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32));
1880 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1881 }
1882 }
1883 }
1884
1885 return ERROR_OK;
1886 }
1887
1888 int xscale_read_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer)
1889 {
1890 armv4_5_common_t *armv4_5 = target->arch_info;
1891 xscale_common_t *xscale = armv4_5->arch_info;
1892 u32 *buf32;
1893 int i;
1894 int retval;
1895
1896 LOG_DEBUG("address: 0x%8.8x, size: 0x%8.8x, count: 0x%8.8x", address, size, count);
1897
1898 if (target->state != TARGET_HALTED)
1899 {
1900 LOG_WARNING("target not halted");
1901 return ERROR_TARGET_NOT_HALTED;
1902 }
1903
1904 /* sanitize arguments */
1905 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1906 return ERROR_INVALID_ARGUMENTS;
1907
1908 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1909 return ERROR_TARGET_UNALIGNED_ACCESS;
1910
1911 /* send memory read request (command 0x1n, n: access size) */
1912 if ((retval=xscale_send_u32(target, 0x10 | size))!=ERROR_OK)
1913 return retval;
1914
1915 /* send base address for read request */
1916 if ((retval=xscale_send_u32(target, address))!=ERROR_OK)
1917 return retval;
1918
1919 /* send number of requested data words */
1920 if ((retval=xscale_send_u32(target, count))!=ERROR_OK)
1921 return retval;
1922
1923 /* receive data from target (count times 32-bit words in host endianness) */
1924 buf32 = malloc(4 * count);
1925 if ((retval=xscale_receive(target, buf32, count))!=ERROR_OK)
1926 return retval;
1927
1928 /* extract data from host-endian buffer into byte stream */
1929 for (i = 0; i < count; i++)
1930 {
1931 switch (size)
1932 {
1933 case 4:
1934 target_buffer_set_u32(target, buffer, buf32[i]);
1935 buffer += 4;
1936 break;
1937 case 2:
1938 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1939 buffer += 2;
1940 break;
1941 case 1:
1942 *buffer++ = buf32[i] & 0xff;
1943 break;
1944 default:
1945 LOG_ERROR("should never get here");
1946 exit(-1);
1947 }
1948 }
1949
1950 free(buf32);
1951
1952 /* examine DCSR, to see if Sticky Abort (SA) got set */
1953 if ((retval=xscale_read_dcsr(target))!=ERROR_OK)
1954 return retval;
1955 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1956 {
1957 /* clear SA bit */
1958 if ((retval=xscale_send_u32(target, 0x60))!=ERROR_OK)
1959 return retval;
1960
1961 return ERROR_TARGET_DATA_ABORT;
1962 }
1963
1964 return ERROR_OK;
1965 }
1966
1967 int xscale_write_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer)
1968 {
1969 armv4_5_common_t *armv4_5 = target->arch_info;
1970 xscale_common_t *xscale = armv4_5->arch_info;
1971 int retval;
1972
1973 LOG_DEBUG("address: 0x%8.8x, size: 0x%8.8x, count: 0x%8.8x", address, size, count);
1974
1975 if (target->state != TARGET_HALTED)
1976 {
1977 LOG_WARNING("target not halted");
1978 return ERROR_TARGET_NOT_HALTED;
1979 }
1980
1981 /* sanitize arguments */
1982 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1983 return ERROR_INVALID_ARGUMENTS;
1984
1985 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1986 return ERROR_TARGET_UNALIGNED_ACCESS;
1987
1988 /* send memory write request (command 0x2n, n: access size) */
1989 if ((retval=xscale_send_u32(target, 0x20 | size))!=ERROR_OK)
1990 return retval;
1991
1992 /* send base address for read request */
1993 if ((retval=xscale_send_u32(target, address))!=ERROR_OK)
1994 return retval;
1995
1996 /* send number of requested data words to be written*/
1997 if ((retval=xscale_send_u32(target, count))!=ERROR_OK)
1998 return retval;
1999
2000 /* extract data from host-endian buffer into byte stream */
2001 #if 0
2002 for (i = 0; i < count; i++)
2003 {
2004 switch (size)
2005 {
2006 case 4:
2007 value = target_buffer_get_u32(target, buffer);
2008 xscale_send_u32(target, value);
2009 buffer += 4;
2010 break;
2011 case 2:
2012 value = target_buffer_get_u16(target, buffer);
2013 xscale_send_u32(target, value);
2014 buffer += 2;
2015 break;
2016 case 1:
2017 value = *buffer;
2018 xscale_send_u32(target, value);
2019 buffer += 1;
2020 break;
2021 default:
2022 LOG_ERROR("should never get here");
2023 exit(-1);
2024 }
2025 }
2026 #endif
2027 if ((retval=xscale_send(target, buffer, count, size))!=ERROR_OK)
2028 return retval;
2029
2030 /* examine DCSR, to see if Sticky Abort (SA) got set */
2031 if ((retval=xscale_read_dcsr(target))!=ERROR_OK)
2032 return retval;
2033 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
2034 {
2035 /* clear SA bit */
2036 if ((retval=xscale_send_u32(target, 0x60))!=ERROR_OK)
2037 return retval;
2038
2039 return ERROR_TARGET_DATA_ABORT;
2040 }
2041
2042 return ERROR_OK;
2043 }
2044
2045 int xscale_bulk_write_memory(target_t *target, u32 address, u32 count, u8 *buffer)
2046 {
2047 return xscale_write_memory(target, address, 4, count, buffer);
2048 }
2049
2050 u32 xscale_get_ttb(target_t *target)
2051 {
2052 armv4_5_common_t *armv4_5 = target->arch_info;
2053 xscale_common_t *xscale = armv4_5->arch_info;
2054 u32 ttb;
2055
2056 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
2057 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
2058
2059 return ttb;
2060 }
2061
2062 void xscale_disable_mmu_caches(target_t *target, int mmu, int d_u_cache, int i_cache)
2063 {
2064 armv4_5_common_t *armv4_5 = target->arch_info;
2065 xscale_common_t *xscale = armv4_5->arch_info;
2066 u32 cp15_control;
2067
2068 /* read cp15 control register */
2069 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2070 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2071
2072 if (mmu)
2073 cp15_control &= ~0x1U;
2074
2075 if (d_u_cache)
2076 {
2077 /* clean DCache */
2078 xscale_send_u32(target, 0x50);
2079 xscale_send_u32(target, xscale->cache_clean_address);
2080
2081 /* invalidate DCache */
2082 xscale_send_u32(target, 0x51);
2083
2084 cp15_control &= ~0x4U;
2085 }
2086
2087 if (i_cache)
2088 {
2089 /* invalidate ICache */
2090 xscale_send_u32(target, 0x52);
2091 cp15_control &= ~0x1000U;
2092 }
2093
2094 /* write new cp15 control register */
2095 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2096
2097 /* execute cpwait to ensure outstanding operations complete */
2098 xscale_send_u32(target, 0x53);
2099 }
2100
2101 void xscale_enable_mmu_caches(target_t *target, int mmu, int d_u_cache, int i_cache)
2102 {
2103 armv4_5_common_t *armv4_5 = target->arch_info;
2104 xscale_common_t *xscale = armv4_5->arch_info;
2105 u32 cp15_control;
2106
2107 /* read cp15 control register */
2108 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2109 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2110
2111 if (mmu)
2112 cp15_control |= 0x1U;
2113
2114 if (d_u_cache)
2115 cp15_control |= 0x4U;
2116
2117 if (i_cache)
2118 cp15_control |= 0x1000U;
2119
2120 /* write new cp15 control register */
2121 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2122
2123 /* execute cpwait to ensure outstanding operations complete */
2124 xscale_send_u32(target, 0x53);
2125 }
2126
2127 int xscale_set_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2128 {
2129 armv4_5_common_t *armv4_5 = target->arch_info;
2130 xscale_common_t *xscale = armv4_5->arch_info;
2131
2132 if (target->state != TARGET_HALTED)
2133 {
2134 LOG_WARNING("target not halted");
2135 return ERROR_TARGET_NOT_HALTED;
2136 }
2137
2138 if (xscale->force_hw_bkpts)
2139 breakpoint->type = BKPT_HARD;
2140
2141 if (breakpoint->set)
2142 {
2143 LOG_WARNING("breakpoint already set");
2144 return ERROR_OK;
2145 }
2146
2147 if (breakpoint->type == BKPT_HARD)
2148 {
2149 u32 value = breakpoint->address | 1;
2150 if (!xscale->ibcr0_used)
2151 {
2152 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2153 xscale->ibcr0_used = 1;
2154 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2155 }
2156 else if (!xscale->ibcr1_used)
2157 {
2158 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2159 xscale->ibcr1_used = 1;
2160 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2161 }
2162 else
2163 {
2164 LOG_ERROR("BUG: no hardware comparator available");
2165 return ERROR_OK;
2166 }
2167 }
2168 else if (breakpoint->type == BKPT_SOFT)
2169 {
2170 if (breakpoint->length == 4)
2171 {
2172 /* keep the original instruction in target endianness */
2173 target->type->read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr);
2174 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2175 target_write_u32(target, breakpoint->address, xscale->arm_bkpt);
2176 }
2177 else
2178 {
2179 /* keep the original instruction in target endianness */
2180 target->type->read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr);
2181 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2182 target_write_u32(target, breakpoint->address, xscale->thumb_bkpt);
2183 }
2184 breakpoint->set = 1;
2185 }
2186
2187 return ERROR_OK;
2188
2189 }
2190
2191 int xscale_add_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2192 {
2193 armv4_5_common_t *armv4_5 = target->arch_info;
2194 xscale_common_t *xscale = armv4_5->arch_info;
2195
2196 if (target->state != TARGET_HALTED)
2197 {
2198 LOG_WARNING("target not halted");
2199 return ERROR_TARGET_NOT_HALTED;
2200 }
2201
2202 if (xscale->force_hw_bkpts)
2203 {
2204 LOG_DEBUG("forcing use of hardware breakpoint at address 0x%8.8x", breakpoint->address);
2205 breakpoint->type = BKPT_HARD;
2206 }
2207
2208 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2209 {
2210 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2211 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2212 }
2213 else
2214 {
2215 xscale->ibcr_available--;
2216 }
2217
2218 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2219 {
2220 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2221 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2222 }
2223
2224 return ERROR_OK;
2225 }
2226
2227 int xscale_unset_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2228 {
2229 armv4_5_common_t *armv4_5 = target->arch_info;
2230 xscale_common_t *xscale = armv4_5->arch_info;
2231
2232 if (target->state != TARGET_HALTED)
2233 {
2234 LOG_WARNING("target not halted");
2235 return ERROR_TARGET_NOT_HALTED;
2236 }
2237
2238 if (!breakpoint->set)
2239 {
2240 LOG_WARNING("breakpoint not set");
2241 return ERROR_OK;
2242 }
2243
2244 if (breakpoint->type == BKPT_HARD)
2245 {
2246 if (breakpoint->set == 1)
2247 {
2248 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2249 xscale->ibcr0_used = 0;
2250 }
2251 else if (breakpoint->set == 2)
2252 {
2253 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2254 xscale->ibcr1_used = 0;
2255 }
2256 breakpoint->set = 0;
2257 }
2258 else
2259 {
2260 /* restore original instruction (kept in target endianness) */
2261 if (breakpoint->length == 4)
2262 {
2263 target->type->write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr);
2264 }
2265 else
2266 {
2267 target->type->write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr);
2268 }
2269 breakpoint->set = 0;
2270 }
2271
2272 return ERROR_OK;
2273 }
2274
2275 int xscale_remove_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2276 {
2277 armv4_5_common_t *armv4_5 = target->arch_info;
2278 xscale_common_t *xscale = armv4_5->arch_info;
2279
2280 if (target->state != TARGET_HALTED)
2281 {
2282 LOG_WARNING("target not halted");
2283 return ERROR_TARGET_NOT_HALTED;
2284 }
2285
2286 if (breakpoint->set)
2287 {
2288 xscale_unset_breakpoint(target, breakpoint);
2289 }
2290
2291 if (breakpoint->type == BKPT_HARD)
2292 xscale->ibcr_available++;
2293
2294 return ERROR_OK;
2295 }
2296
2297 int xscale_set_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2298 {
2299 armv4_5_common_t *armv4_5 = target->arch_info;
2300 xscale_common_t *xscale = armv4_5->arch_info;
2301 u8 enable=0;
2302 reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2303 u32 dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2304
2305 if (target->state != TARGET_HALTED)
2306 {
2307 LOG_WARNING("target not halted");
2308 return ERROR_TARGET_NOT_HALTED;
2309 }
2310
2311 xscale_get_reg(dbcon);
2312
2313 switch (watchpoint->rw)
2314 {
2315 case WPT_READ:
2316 enable = 0x3;
2317 break;
2318 case WPT_ACCESS:
2319 enable = 0x2;
2320 break;
2321 case WPT_WRITE:
2322 enable = 0x1;
2323 break;
2324 default:
2325 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2326 }
2327
2328 if (!xscale->dbr0_used)
2329 {
2330 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2331 dbcon_value |= enable;
2332 xscale_set_reg_u32(dbcon, dbcon_value);
2333 watchpoint->set = 1;
2334 xscale->dbr0_used = 1;
2335 }
2336 else if (!xscale->dbr1_used)
2337 {
2338 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2339 dbcon_value |= enable << 2;
2340 xscale_set_reg_u32(dbcon, dbcon_value);
2341 watchpoint->set = 2;
2342 xscale->dbr1_used = 1;
2343 }
2344 else
2345 {
2346 LOG_ERROR("BUG: no hardware comparator available");
2347 return ERROR_OK;
2348 }
2349
2350 return ERROR_OK;
2351 }
2352
2353 int xscale_add_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2354 {
2355 armv4_5_common_t *armv4_5 = target->arch_info;
2356 xscale_common_t *xscale = armv4_5->arch_info;
2357
2358 if (target->state != TARGET_HALTED)
2359 {
2360 LOG_WARNING("target not halted");
2361 return ERROR_TARGET_NOT_HALTED;
2362 }
2363
2364 if (xscale->dbr_available < 1)
2365 {
2366 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2367 }
2368
2369 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2370 {
2371 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2372 }
2373
2374 xscale->dbr_available--;
2375
2376 return ERROR_OK;
2377 }
2378
2379 int xscale_unset_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2380 {
2381 armv4_5_common_t *armv4_5 = target->arch_info;
2382 xscale_common_t *xscale = armv4_5->arch_info;
2383 reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2384 u32 dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2385
2386 if (target->state != TARGET_HALTED)
2387 {
2388 LOG_WARNING("target not halted");
2389 return ERROR_TARGET_NOT_HALTED;
2390 }
2391
2392 if (!watchpoint->set)
2393 {
2394 LOG_WARNING("breakpoint not set");
2395 return ERROR_OK;
2396 }
2397
2398 if (watchpoint->set == 1)
2399 {
2400 dbcon_value &= ~0x3;
2401 xscale_set_reg_u32(dbcon, dbcon_value);
2402 xscale->dbr0_used = 0;
2403 }
2404 else if (watchpoint->set == 2)
2405 {
2406 dbcon_value &= ~0xc;
2407 xscale_set_reg_u32(dbcon, dbcon_value);
2408 xscale->dbr1_used = 0;
2409 }
2410 watchpoint->set = 0;
2411
2412 return ERROR_OK;
2413 }
2414
2415 int xscale_remove_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2416 {
2417 armv4_5_common_t *armv4_5 = target->arch_info;
2418 xscale_common_t *xscale = armv4_5->arch_info;
2419
2420 if (target->state != TARGET_HALTED)
2421 {
2422 LOG_WARNING("target not halted");
2423 return ERROR_TARGET_NOT_HALTED;
2424 }
2425
2426 if (watchpoint->set)
2427 {
2428 xscale_unset_watchpoint(target, watchpoint);
2429 }
2430
2431 xscale->dbr_available++;
2432
2433 return ERROR_OK;
2434 }
2435
2436 void xscale_enable_watchpoints(struct target_s *target)
2437 {
2438 watchpoint_t *watchpoint = target->watchpoints;
2439
2440 while (watchpoint)
2441 {
2442 if (watchpoint->set == 0)
2443 xscale_set_watchpoint(target, watchpoint);
2444 watchpoint = watchpoint->next;
2445 }
2446 }
2447
2448 void xscale_enable_breakpoints(struct target_s *target)
2449 {
2450 breakpoint_t *breakpoint = target->breakpoints;
2451
2452 /* set any pending breakpoints */
2453 while (breakpoint)
2454 {
2455 if (breakpoint->set == 0)
2456 xscale_set_breakpoint(target, breakpoint);
2457 breakpoint = breakpoint->next;
2458 }
2459 }
2460
2461 int xscale_get_reg(reg_t *reg)
2462 {
2463 xscale_reg_t *arch_info = reg->arch_info;
2464 target_t *target = arch_info->target;
2465 armv4_5_common_t *armv4_5 = target->arch_info;
2466 xscale_common_t *xscale = armv4_5->arch_info;
2467
2468 /* DCSR, TX and RX are accessible via JTAG */
2469 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2470 {
2471 return xscale_read_dcsr(arch_info->target);
2472 }
2473 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2474 {
2475 /* 1 = consume register content */
2476 return xscale_read_tx(arch_info->target, 1);
2477 }
2478 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2479 {
2480 /* can't read from RX register (host -> debug handler) */
2481 return ERROR_OK;
2482 }
2483 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2484 {
2485 /* can't (explicitly) read from TXRXCTRL register */
2486 return ERROR_OK;
2487 }
2488 else /* Other DBG registers have to be transfered by the debug handler */
2489 {
2490 /* send CP read request (command 0x40) */
2491 xscale_send_u32(target, 0x40);
2492
2493 /* send CP register number */
2494 xscale_send_u32(target, arch_info->dbg_handler_number);
2495
2496 /* read register value */
2497 xscale_read_tx(target, 1);
2498 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2499
2500 reg->dirty = 0;
2501 reg->valid = 1;
2502 }
2503
2504 return ERROR_OK;
2505 }
2506
2507 int xscale_set_reg(reg_t *reg, u8* buf)
2508 {
2509 xscale_reg_t *arch_info = reg->arch_info;
2510 target_t *target = arch_info->target;
2511 armv4_5_common_t *armv4_5 = target->arch_info;
2512 xscale_common_t *xscale = armv4_5->arch_info;
2513 u32 value = buf_get_u32(buf, 0, 32);
2514
2515 /* DCSR, TX and RX are accessible via JTAG */
2516 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2517 {
2518 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2519 return xscale_write_dcsr(arch_info->target, -1, -1);
2520 }
2521 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2522 {
2523 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2524 return xscale_write_rx(arch_info->target);
2525 }
2526 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2527 {
2528 /* can't write to TX register (debug-handler -> host) */
2529 return ERROR_OK;
2530 }
2531 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2532 {
2533 /* can't (explicitly) write to TXRXCTRL register */
2534 return ERROR_OK;
2535 }
2536 else /* Other DBG registers have to be transfered by the debug handler */
2537 {
2538 /* send CP write request (command 0x41) */
2539 xscale_send_u32(target, 0x41);
2540
2541 /* send CP register number */
2542 xscale_send_u32(target, arch_info->dbg_handler_number);
2543
2544 /* send CP register value */
2545 xscale_send_u32(target, value);
2546 buf_set_u32(reg->value, 0, 32, value);
2547 }
2548
2549 return ERROR_OK;
2550 }
2551
2552 /* convenience wrapper to access XScale specific registers */
2553 int xscale_set_reg_u32(reg_t *reg, u32 value)
2554 {
2555 u8 buf[4];
2556
2557 buf_set_u32(buf, 0, 32, value);
2558
2559 return xscale_set_reg(reg, buf);
2560 }
2561
2562 int xscale_write_dcsr_sw(target_t *target, u32 value)
2563 {
2564 /* get pointers to arch-specific information */
2565 armv4_5_common_t *armv4_5 = target->arch_info;
2566 xscale_common_t *xscale = armv4_5->arch_info;
2567 reg_t *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2568 xscale_reg_t *dcsr_arch_info = dcsr->arch_info;
2569
2570 /* send CP write request (command 0x41) */
2571 xscale_send_u32(target, 0x41);
2572
2573 /* send CP register number */
2574 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2575
2576 /* send CP register value */
2577 xscale_send_u32(target, value);
2578 buf_set_u32(dcsr->value, 0, 32, value);
2579
2580 return ERROR_OK;
2581 }
2582
2583 int xscale_read_trace(target_t *target)
2584 {
2585 /* get pointers to arch-specific information */
2586 armv4_5_common_t *armv4_5 = target->arch_info;
2587 xscale_common_t *xscale = armv4_5->arch_info;
2588 xscale_trace_data_t **trace_data_p;
2589
2590 /* 258 words from debug handler
2591 * 256 trace buffer entries
2592 * 2 checkpoint addresses
2593 */
2594 u32 trace_buffer[258];
2595 int is_address[256];
2596 int i, j;
2597
2598 if (target->state != TARGET_HALTED)
2599 {
2600 LOG_WARNING("target must be stopped to read trace data");
2601 return ERROR_TARGET_NOT_HALTED;
2602 }
2603
2604 /* send read trace buffer command (command 0x61) */
2605 xscale_send_u32(target, 0x61);
2606
2607 /* receive trace buffer content */
2608 xscale_receive(target, trace_buffer, 258);
2609
2610 /* parse buffer backwards to identify address entries */
2611 for (i = 255; i >= 0; i--)
2612 {
2613 is_address[i] = 0;
2614 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2615 ((trace_buffer[i] & 0xf0) == 0xd0))
2616 {
2617 if (i >= 3)
2618 is_address[--i] = 1;
2619 if (i >= 2)
2620 is_address[--i] = 1;
2621 if (i >= 1)
2622 is_address[--i] = 1;
2623 if (i >= 0)
2624 is_address[--i] = 1;
2625 }
2626 }
2627
2628
2629 /* search first non-zero entry */
2630 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2631 ;
2632
2633 if (j == 256)
2634 {
2635 LOG_DEBUG("no trace data collected");
2636 return ERROR_XSCALE_NO_TRACE_DATA;
2637 }
2638
2639 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2640 ;
2641
2642 *trace_data_p = malloc(sizeof(xscale_trace_data_t));
2643 (*trace_data_p)->next = NULL;
2644 (*trace_data_p)->chkpt0 = trace_buffer[256];
2645 (*trace_data_p)->chkpt1 = trace_buffer[257];
2646 (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
2647 (*trace_data_p)->entries = malloc(sizeof(xscale_trace_entry_t) * (256 - j));
2648 (*trace_data_p)->depth = 256 - j;
2649
2650 for (i = j; i < 256; i++)
2651 {
2652 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2653 if (is_address[i])
2654 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2655 else
2656 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2657 }
2658
2659 return ERROR_OK;
2660 }
2661
2662 int xscale_read_instruction(target_t *target, arm_instruction_t *instruction)
2663 {
2664 /* get pointers to arch-specific information */
2665 armv4_5_common_t *armv4_5 = target->arch_info;
2666 xscale_common_t *xscale = armv4_5->arch_info;
2667 int i;
2668 int section = -1;
2669 u32 size_read;
2670 u32 opcode;
2671 int retval;
2672
2673 if (!xscale->trace.image)
2674 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2675
2676 /* search for the section the current instruction belongs to */
2677 for (i = 0; i < xscale->trace.image->num_sections; i++)
2678 {
2679 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2680 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2681 {
2682 section = i;
2683 break;
2684 }
2685 }
2686
2687 if (section == -1)
2688 {
2689 /* current instruction couldn't be found in the image */
2690 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2691 }
2692
2693 if (xscale->trace.core_state == ARMV4_5_STATE_ARM)
2694 {
2695 u8 buf[4];
2696 if ((retval = image_read_section(xscale->trace.image, section,
2697 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2698 4, buf, &size_read)) != ERROR_OK)
2699 {
2700 LOG_ERROR("error while reading instruction: %i", retval);
2701 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2702 }
2703 opcode = target_buffer_get_u32(target, buf);
2704 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2705 }
2706 else if (xscale->trace.core_state == ARMV4_5_STATE_THUMB)
2707 {
2708 u8 buf[2];
2709 if ((retval = image_read_section(xscale->trace.image, section,
2710 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2711 2, buf, &size_read)) != ERROR_OK)
2712 {
2713 LOG_ERROR("error while reading instruction: %i", retval);
2714 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2715 }
2716 opcode = target_buffer_get_u16(target, buf);
2717 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2718 }
2719 else
2720 {
2721 LOG_ERROR("BUG: unknown core state encountered");
2722 exit(-1);
2723 }
2724
2725 return ERROR_OK;
2726 }
2727
2728 int xscale_branch_address(xscale_trace_data_t *trace_data, int i, u32 *target)
2729 {
2730 /* if there are less than four entries prior to the indirect branch message
2731 * we can't extract the address */
2732 if (i < 4)
2733 {
2734 return -1;
2735 }
2736
2737 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2738 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2739
2740 return 0;
2741 }
2742
2743 int xscale_analyze_trace(target_t *target, command_context_t *cmd_ctx)
2744 {
2745 /* get pointers to arch-specific information */
2746 armv4_5_common_t *armv4_5 = target->arch_info;
2747 xscale_common_t *xscale = armv4_5->arch_info;
2748 int next_pc_ok = 0;
2749 u32 next_pc = 0x0;
2750 xscale_trace_data_t *trace_data = xscale->trace.data;
2751 int retval;
2752
2753 while (trace_data)
2754 {
2755 int i, chkpt;
2756 int rollover;
2757 int branch;
2758 int exception;
2759 xscale->trace.core_state = ARMV4_5_STATE_ARM;
2760
2761 chkpt = 0;
2762 rollover = 0;
2763
2764 for (i = 0; i < trace_data->depth; i++)
2765 {
2766 next_pc_ok = 0;
2767 branch = 0;
2768 exception = 0;
2769
2770 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2771 continue;
2772
2773 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2774 {
2775 case 0: /* Exceptions */
2776 case 1:
2777 case 2:
2778 case 3:
2779 case 4:
2780 case 5:
2781 case 6:
2782 case 7:
2783 exception = (trace_data->entries[i].data & 0x70) >> 4;
2784 next_pc_ok = 1;
2785 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2786 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2787 break;
2788 case 8: /* Direct Branch */
2789 branch = 1;
2790 break;
2791 case 9: /* Indirect Branch */
2792 branch = 1;
2793 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2794 {
2795 next_pc_ok = 1;
2796 }
2797 break;
2798 case 13: /* Checkpointed Indirect Branch */
2799 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2800 {
2801 next_pc_ok = 1;
2802 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2803 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2804 LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2805 }
2806 /* explicit fall-through */
2807 case 12: /* Checkpointed Direct Branch */
2808 branch = 1;
2809 if (chkpt == 0)
2810 {
2811 next_pc_ok = 1;
2812 next_pc = trace_data->chkpt0;
2813 chkpt++;
2814 }
2815 else if (chkpt == 1)
2816 {
2817 next_pc_ok = 1;
2818 next_pc = trace_data->chkpt0;
2819 chkpt++;
2820 }
2821 else
2822 {
2823 LOG_WARNING("more than two checkpointed branches encountered");
2824 }
2825 break;
2826 case 15: /* Roll-over */
2827 rollover++;
2828 continue;
2829 default: /* Reserved */
2830 command_print(cmd_ctx, "--- reserved trace message ---");
2831 LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2832 return ERROR_OK;
2833 }
2834
2835 if (xscale->trace.pc_ok)
2836 {
2837 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2838 arm_instruction_t instruction;
2839
2840 if ((exception == 6) || (exception == 7))
2841 {
2842 /* IRQ or FIQ exception, no instruction executed */
2843 executed -= 1;
2844 }
2845
2846 while (executed-- >= 0)
2847 {
2848 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2849 {
2850 /* can't continue tracing with no image available */
2851 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2852 {
2853 return retval;
2854 }
2855 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2856 {
2857 /* TODO: handle incomplete images */
2858 }
2859 }
2860
2861 /* a precise abort on a load to the PC is included in the incremental
2862 * word count, other instructions causing data aborts are not included
2863 */
2864 if ((executed == 0) && (exception == 4)
2865 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2866 {
2867 if ((instruction.type == ARM_LDM)
2868 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2869 {
2870 executed--;
2871 }
2872 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2873 && (instruction.info.load_store.Rd != 15))
2874 {
2875 executed--;
2876 }
2877 }
2878
2879 /* only the last instruction executed
2880 * (the one that caused the control flow change)
2881 * could be a taken branch
2882 */
2883 if (((executed == -1) && (branch == 1)) &&
2884 (((instruction.type == ARM_B) ||
2885 (instruction.type == ARM_BL) ||
2886 (instruction.type == ARM_BLX)) &&
2887 (instruction.info.b_bl_bx_blx.target_address != -1)))
2888 {
2889 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2890 }
2891 else
2892 {
2893 xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2;
2894 }
2895 command_print(cmd_ctx, "%s", instruction.text);
2896 }
2897
2898 rollover = 0;
2899 }
2900
2901 if (next_pc_ok)
2902 {
2903 xscale->trace.current_pc = next_pc;
2904 xscale->trace.pc_ok = 1;
2905 }
2906 }
2907
2908 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2)
2909 {
2910 arm_instruction_t instruction;
2911 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2912 {
2913 /* can't continue tracing with no image available */
2914 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2915 {
2916 return retval;
2917 }
2918 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2919 {
2920 /* TODO: handle incomplete images */
2921 }
2922 }
2923 command_print(cmd_ctx, "%s", instruction.text);
2924 }
2925
2926 trace_data = trace_data->next;
2927 }
2928
2929 return ERROR_OK;
2930 }
2931
2932 void xscale_build_reg_cache(target_t *target)
2933 {
2934 /* get pointers to arch-specific information */
2935 armv4_5_common_t *armv4_5 = target->arch_info;
2936 xscale_common_t *xscale = armv4_5->arch_info;
2937
2938 reg_cache_t **cache_p = register_get_last_cache_p(&target->reg_cache);
2939 xscale_reg_t *arch_info = malloc(sizeof(xscale_reg_arch_info));
2940 int i;
2941 int num_regs = sizeof(xscale_reg_arch_info) / sizeof(xscale_reg_t);
2942
2943 (*cache_p) = armv4_5_build_reg_cache(target, armv4_5);
2944 armv4_5->core_cache = (*cache_p);
2945
2946 /* register a register arch-type for XScale dbg registers only once */
2947 if (xscale_reg_arch_type == -1)
2948 xscale_reg_arch_type = register_reg_arch_type(xscale_get_reg, xscale_set_reg);
2949
2950 (*cache_p)->next = malloc(sizeof(reg_cache_t));
2951 cache_p = &(*cache_p)->next;
2952
2953 /* fill in values for the xscale reg cache */
2954 (*cache_p)->name = "XScale registers";
2955 (*cache_p)->next = NULL;
2956 (*cache_p)->reg_list = malloc(num_regs * sizeof(reg_t));
2957 (*cache_p)->num_regs = num_regs;
2958
2959 for (i = 0; i < num_regs; i++)
2960 {
2961 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2962 (*cache_p)->reg_list[i].value = calloc(4, 1);
2963 (*cache_p)->reg_list[i].dirty = 0;
2964 (*cache_p)->reg_list[i].valid = 0;
2965 (*cache_p)->reg_list[i].size = 32;
2966 (*cache_p)->reg_list[i].bitfield_desc = NULL;
2967 (*cache_p)->reg_list[i].num_bitfields = 0;
2968 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2969 (*cache_p)->reg_list[i].arch_type = xscale_reg_arch_type;
2970 arch_info[i] = xscale_reg_arch_info[i];
2971 arch_info[i].target = target;
2972 }
2973
2974 xscale->reg_cache = (*cache_p);
2975 }
2976
2977 int xscale_init_target(struct command_context_s *cmd_ctx, struct target_s *target)
2978 {
2979 return ERROR_OK;
2980 }
2981
2982 int xscale_quit()
2983 {
2984
2985 return ERROR_OK;
2986 }
2987
2988 int xscale_init_arch_info(target_t *target, xscale_common_t *xscale, int chain_pos, char *variant)
2989 {
2990 armv4_5_common_t *armv4_5;
2991 u32 high_reset_branch, low_reset_branch;
2992 int i;
2993
2994 armv4_5 = &xscale->armv4_5_common;
2995
2996 /* store architecture specfic data (none so far) */
2997 xscale->arch_info = NULL;
2998 xscale->common_magic = XSCALE_COMMON_MAGIC;
2999
3000 /* remember the variant (PXA25x, PXA27x, IXP42x, ...) */
3001 xscale->variant = strdup(variant);
3002
3003 /* prepare JTAG information for the new target */
3004 xscale->jtag_info.chain_pos = chain_pos;
3005
3006 xscale->jtag_info.dbgrx = 0x02;
3007 xscale->jtag_info.dbgtx = 0x10;
3008 xscale->jtag_info.dcsr = 0x09;
3009 xscale->jtag_info.ldic = 0x07;
3010
3011 if ((strcmp(xscale->variant, "pxa250") == 0) ||
3012 (strcmp(xscale->variant, "pxa255") == 0) ||
3013 (strcmp(xscale->variant, "pxa26x") == 0))
3014 {
3015 xscale->jtag_info.ir_length = 5;
3016 }
3017 else if ((strcmp(xscale->variant, "pxa27x") == 0) ||
3018 (strcmp(xscale->variant, "ixp42x") == 0) ||
3019 (strcmp(xscale->variant, "ixp45x") == 0) ||
3020 (strcmp(xscale->variant, "ixp46x") == 0))
3021 {
3022 xscale->jtag_info.ir_length = 7;
3023 }
3024
3025 /* the debug handler isn't installed (and thus not running) at this time */
3026 xscale->handler_installed = 0;
3027 xscale->handler_running = 0;
3028 xscale->handler_address = 0xfe000800;
3029
3030 /* clear the vectors we keep locally for reference */
3031 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
3032 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
3033
3034 /* no user-specified vectors have been configured yet */
3035 xscale->static_low_vectors_set = 0x0;
3036 xscale->static_high_vectors_set = 0x0;
3037
3038 /* calculate branches to debug handler */
3039 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
3040 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
3041
3042 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
3043 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
3044
3045 for (i = 1; i <= 7; i++)
3046 {
3047 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3048 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3049 }
3050
3051 /* 64kB aligned region used for DCache cleaning */
3052 xscale->cache_clean_address = 0xfffe0000;
3053
3054 xscale->hold_rst = 0;
3055 xscale->external_debug_break = 0;
3056
3057 xscale->force_hw_bkpts = 1;
3058
3059 xscale->ibcr_available = 2;
3060 xscale->ibcr0_used = 0;
3061 xscale->ibcr1_used = 0;
3062
3063 xscale->dbr_available = 2;
3064 xscale->dbr0_used = 0;
3065 xscale->dbr1_used = 0;
3066
3067 xscale->arm_bkpt = ARMV5_BKPT(0x0);
3068 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
3069
3070 xscale->vector_catch = 0x1;
3071
3072 xscale->trace.capture_status = TRACE_IDLE;
3073 xscale->trace.data = NULL;
3074 xscale->trace.image = NULL;
3075 xscale->trace.buffer_enabled = 0;
3076 xscale->trace.buffer_fill = 0;
3077
3078 /* prepare ARMv4/5 specific information */
3079 armv4_5->arch_info = xscale;
3080 armv4_5->read_core_reg = xscale_read_core_reg;
3081 armv4_5->write_core_reg = xscale_write_core_reg;
3082 armv4_5->full_context = xscale_full_context;
3083
3084 armv4_5_init_arch_info(target, armv4_5);
3085
3086 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
3087 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
3088 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
3089 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
3090 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
3091 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
3092 xscale->armv4_5_mmu.has_tiny_pages = 1;
3093 xscale->armv4_5_mmu.mmu_enabled = 0;
3094
3095 return ERROR_OK;
3096 }
3097
3098 /* target xscale <endianess> <startup_mode> <chain_pos> <variant> */
3099 int xscale_target_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc, struct target_s *target)
3100 {
3101 int chain_pos;
3102 char *variant = NULL;
3103 xscale_common_t *xscale = malloc(sizeof(xscale_common_t));
3104 memset(xscale, 0, sizeof(*xscale));
3105
3106 if (argc < 5)
3107 {
3108 LOG_ERROR("'target xscale' requires four arguments: <endianess> <startup_mode> <chain_pos> <variant>");
3109 return ERROR_OK;
3110 }
3111
3112 chain_pos = strtoul(args[3], NULL, 0);
3113
3114 variant = args[4];
3115
3116 xscale_init_arch_info(target, xscale, chain_pos, variant);
3117 xscale_build_reg_cache(target);
3118
3119 return ERROR_OK;
3120 }
3121
3122 int xscale_handle_debug_handler_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3123 {
3124 target_t *target = NULL;
3125 armv4_5_common_t *armv4_5;
3126 xscale_common_t *xscale;
3127
3128 u32 handler_address;
3129
3130 if (argc < 2)
3131 {
3132 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3133 return ERROR_OK;
3134 }
3135
3136 if ((target = get_target_by_num(strtoul(args[0], NULL, 0))) == NULL)
3137 {
3138 LOG_ERROR("no target '%s' configured", args[0]);
3139 return ERROR_OK;
3140 }
3141
3142 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3143 {
3144 return ERROR_OK;
3145 }
3146
3147 handler_address = strtoul(args[1], NULL, 0);
3148
3149 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3150 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3151 {
3152 xscale->handler_address = handler_address;
3153 }
3154 else
3155 {
3156 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3157 }
3158
3159 return ERROR_OK;
3160 }
3161
3162 int xscale_handle_cache_clean_address_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3163 {
3164 target_t *target = NULL;
3165 armv4_5_common_t *armv4_5;
3166 xscale_common_t *xscale;
3167
3168 u32 cache_clean_address;
3169
3170 if (argc < 2)
3171 {
3172 LOG_ERROR("'xscale cache_clean_address <target#> <address>' command takes two required operands");
3173 return ERROR_OK;
3174 }
3175
3176 if ((target = get_target_by_num(strtoul(args[0], NULL, 0))) == NULL)
3177 {
3178 LOG_ERROR("no target '%s' configured", args[0]);
3179 return ERROR_OK;
3180 }
3181
3182 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3183 {
3184 return ERROR_OK;
3185 }
3186
3187 cache_clean_address = strtoul(args[1], NULL, 0);
3188
3189 if (cache_clean_address & 0xffff)
3190 {
3191 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3192 }
3193 else
3194 {
3195 xscale->cache_clean_address = cache_clean_address;
3196 }
3197
3198 return ERROR_OK;
3199 }
3200
3201 int xscale_handle_cache_info_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3202 {
3203 target_t *target = get_current_target(cmd_ctx);
3204 armv4_5_common_t *armv4_5;
3205 xscale_common_t *xscale;
3206
3207 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3208 {
3209 return ERROR_OK;
3210 }
3211
3212 return armv4_5_handle_cache_info_command(cmd_ctx, &xscale->armv4_5_mmu.armv4_5_cache);
3213 }
3214
3215 static int xscale_virt2phys(struct target_s *target, u32 virtual, u32 *physical)
3216 {
3217 armv4_5_common_t *armv4_5;
3218 xscale_common_t *xscale;
3219 int retval;
3220 int type;
3221 u32 cb;
3222 int domain;
3223 u32 ap;
3224
3225
3226 if ((retval = xscale_get_arch_pointers(target, &armv4_5, &xscale)) != ERROR_OK)
3227 {
3228 return retval;
3229 }
3230 u32 ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3231 if (type == -1)
3232 {
3233 return ret;
3234 }
3235 *physical = ret;
3236 return ERROR_OK;
3237 }
3238
3239 static int xscale_mmu(struct target_s *target, int *enabled)
3240 {
3241 armv4_5_common_t *armv4_5 = target->arch_info;
3242 xscale_common_t *xscale = armv4_5->arch_info;
3243
3244 if (target->state != TARGET_HALTED)
3245 {
3246 LOG_ERROR("Target not halted");
3247 return ERROR_TARGET_INVALID;
3248 }
3249 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3250 return ERROR_OK;
3251 }
3252
3253
3254 int xscale_handle_mmu_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3255 {
3256 target_t *target = get_current_target(cmd_ctx);
3257 armv4_5_common_t *armv4_5;
3258 xscale_common_t *xscale;
3259
3260 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3261 {
3262 return ERROR_OK;
3263 }
3264
3265 if (target->state != TARGET_HALTED)
3266 {
3267 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3268 return ERROR_OK;
3269 }
3270
3271 if (argc >= 1)
3272 {
3273 if (strcmp("enable", args[0]) == 0)
3274 {
3275 xscale_enable_mmu_caches(target, 1, 0, 0);
3276 xscale->armv4_5_mmu.mmu_enabled = 1;
3277 }
3278 else if (strcmp("disable", args[0]) == 0)
3279 {
3280 xscale_disable_mmu_caches(target, 1, 0, 0);
3281 xscale->armv4_5_mmu.mmu_enabled = 0;
3282 }
3283 }
3284
3285 command_print(cmd_ctx, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3286
3287 return ERROR_OK;
3288 }
3289
3290 int xscale_handle_idcache_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3291 {
3292 target_t *target = get_current_target(cmd_ctx);
3293 armv4_5_common_t *armv4_5;
3294 xscale_common_t *xscale;
3295 int icache = 0, dcache = 0;
3296
3297 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3298 {
3299 return ERROR_OK;
3300 }
3301
3302 if (target->state != TARGET_HALTED)
3303 {
3304 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3305 return ERROR_OK;
3306 }
3307
3308 if (strcmp(cmd, "icache") == 0)
3309 icache = 1;
3310 else if (strcmp(cmd, "dcache") == 0)
3311 dcache = 1;
3312
3313 if (argc >= 1)
3314 {
3315 if (strcmp("enable", args[0]) == 0)
3316 {
3317 xscale_enable_mmu_caches(target, 0, dcache, icache);
3318
3319 if (icache)
3320 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 1;
3321 else if (dcache)
3322 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 1;
3323 }
3324 else if (strcmp("disable", args[0]) == 0)
3325 {
3326 xscale_disable_mmu_caches(target, 0, dcache, icache);
3327
3328 if (icache)
3329 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 0;
3330 else if (dcache)
3331 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 0;
3332 }
3333 }
3334
3335 if (icache)
3336 command_print(cmd_ctx, "icache %s", (xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled) ? "enabled" : "disabled");
3337
3338 if (dcache)
3339 command_print(cmd_ctx, "dcache %s", (xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled) ? "enabled" : "disabled");
3340
3341 return ERROR_OK;
3342 }
3343
3344 int xscale_handle_vector_catch_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3345 {
3346 target_t *target = get_current_target(cmd_ctx);
3347 armv4_5_common_t *armv4_5;
3348 xscale_common_t *xscale;
3349
3350 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3351 {
3352 return ERROR_OK;
3353 }
3354
3355 if (argc < 1)
3356 {
3357 command_print(cmd_ctx, "usage: xscale vector_catch [mask]");
3358 }
3359 else
3360 {
3361 xscale->vector_catch = strtoul(args[0], NULL, 0);
3362 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3363 xscale_write_dcsr(target, -1, -1);
3364 }
3365
3366 command_print(cmd_ctx, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3367
3368 return ERROR_OK;
3369 }
3370
3371 int xscale_handle_force_hw_bkpts_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3372 {
3373 target_t *target = get_current_target(cmd_ctx);
3374 armv4_5_common_t *armv4_5;
3375 xscale_common_t *xscale;
3376
3377 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3378 {
3379 return ERROR_OK;
3380 }
3381
3382 if ((argc >= 1) && (strcmp("enable", args[0]) == 0))
3383 {
3384 xscale->force_hw_bkpts = 1;
3385 }
3386 else if ((argc >= 1) && (strcmp("disable", args[0]) == 0))
3387 {
3388 xscale->force_hw_bkpts = 0;
3389 }
3390 else
3391 {
3392 command_print(cmd_ctx, "usage: xscale force_hw_bkpts <enable|disable>");
3393 }
3394
3395 command_print(cmd_ctx, "force hardware breakpoints %s", (xscale->force_hw_bkpts) ? "enabled" : "disabled");
3396
3397 return ERROR_OK;
3398 }
3399
3400 int xscale_handle_trace_buffer_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3401 {
3402 target_t *target = get_current_target(cmd_ctx);
3403 armv4_5_common_t *armv4_5;
3404 xscale_common_t *xscale;
3405 u32 dcsr_value;
3406
3407 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3408 {
3409 return ERROR_OK;
3410 }
3411
3412 if (target->state != TARGET_HALTED)
3413 {
3414 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3415 return ERROR_OK;
3416 }
3417
3418 if ((argc >= 1) && (strcmp("enable", args[0]) == 0))
3419 {
3420 xscale_trace_data_t *td, *next_td;
3421 xscale->trace.buffer_enabled = 1;
3422
3423 /* free old trace data */
3424 td = xscale->trace.data;
3425 while (td)
3426 {
3427 next_td = td->next;
3428
3429 if (td->entries)
3430 free(td->entries);
3431 free(td);
3432 td = next_td;
3433 }
3434 xscale->trace.data = NULL;
3435 }
3436 else if ((argc >= 1) && (strcmp("disable", args[0]) == 0))
3437 {
3438 xscale->trace.buffer_enabled = 0;
3439 }
3440
3441 if ((argc >= 2) && (strcmp("fill", args[1]) == 0))
3442 {
3443 if (argc >= 3)
3444 xscale->trace.buffer_fill = strtoul(args[2], NULL, 0);
3445 else
3446 xscale->trace.buffer_fill = 1;
3447 }
3448 else if ((argc >= 2) && (strcmp("wrap", args[1]) == 0))
3449 {
3450 xscale->trace.buffer_fill = -1;
3451 }
3452
3453 if (xscale->trace.buffer_enabled)
3454 {
3455 /* if we enable the trace buffer in fill-once
3456 * mode we know the address of the first instruction */
3457 xscale->trace.pc_ok = 1;
3458 xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
3459 }
3460 else
3461 {
3462 /* otherwise the address is unknown, and we have no known good PC */
3463 xscale->trace.pc_ok = 0;
3464 }
3465
3466 command_print(cmd_ctx, "trace buffer %s (%s)",
3467 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3468 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3469
3470 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3471 if (xscale->trace.buffer_fill >= 0)
3472 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3473 else
3474 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3475
3476 return ERROR_OK;
3477 }
3478
3479 int xscale_handle_trace_image_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3480 {
3481 target_t *target;
3482 armv4_5_common_t *armv4_5;
3483 xscale_common_t *xscale;
3484
3485 if (argc < 1)
3486 {
3487 command_print(cmd_ctx, "usage: xscale trace_image <file> [base address] [type]");
3488 return ERROR_OK;
3489 }
3490
3491 target = get_current_target(cmd_ctx);
3492
3493 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3494 {
3495 return ERROR_OK;
3496 }
3497
3498 if (xscale->trace.image)
3499 {
3500 image_close(xscale->trace.image);
3501 free(xscale->trace.image);
3502 command_print(cmd_ctx, "previously loaded image found and closed");
3503 }
3504
3505 xscale->trace.image = malloc(sizeof(image_t));
3506 xscale->trace.image->base_address_set = 0;
3507 xscale->trace.image->start_address_set = 0;
3508
3509 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3510 if (argc >= 2)
3511 {
3512 xscale->trace.image->base_address_set = 1;
3513 xscale->trace.image->base_address = strtoul(args[1], NULL, 0);
3514 }
3515 else
3516 {
3517 xscale->trace.image->base_address_set = 0;
3518 }
3519
3520 if (image_open(xscale->trace.image, args[0], (argc >= 3) ? args[2] : NULL) != ERROR_OK)
3521 {
3522 free(xscale->trace.image);
3523 xscale->trace.image = NULL;
3524 return ERROR_OK;
3525 }
3526
3527 return ERROR_OK;
3528 }
3529
3530 int xscale_handle_dump_trace_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3531 {
3532 target_t *target = get_current_target(cmd_ctx);
3533 armv4_5_common_t *armv4_5;
3534 xscale_common_t *xscale;
3535 xscale_trace_data_t *trace_data;
3536 fileio_t file;
3537
3538 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3539 {
3540 return ERROR_OK;
3541 }
3542
3543 if (target->state != TARGET_HALTED)
3544 {
3545 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3546 return ERROR_OK;
3547 }
3548
3549 if (argc < 1)
3550 {
3551 command_print(cmd_ctx, "usage: xscale dump_trace <file>");
3552 return ERROR_OK;
3553 }
3554
3555 trace_data = xscale->trace.data;
3556
3557 if (!trace_data)
3558 {
3559 command_print(cmd_ctx, "no trace data collected");
3560 return ERROR_OK;
3561 }
3562
3563 if (fileio_open(&file, args[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3564 {
3565 return ERROR_OK;
3566 }
3567
3568 while (trace_data)
3569 {
3570 int i;
3571
3572 fileio_write_u32(&file, trace_data->chkpt0);
3573 fileio_write_u32(&file, trace_data->chkpt1);
3574 fileio_write_u32(&file, trace_data->last_instruction);
3575 fileio_write_u32(&file, trace_data->depth);
3576
3577 for (i = 0; i < trace_data->depth; i++)
3578 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3579
3580 trace_data = trace_data->next;
3581 }
3582
3583 fileio_close(&file);
3584
3585 return ERROR_OK;
3586 }
3587
3588 int xscale_handle_analyze_trace_buffer_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3589 {
3590 target_t *target = get_current_target(cmd_ctx);
3591 armv4_5_common_t *armv4_5;
3592 xscale_common_t *xscale;
3593
3594 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3595 {
3596 return ERROR_OK;
3597 }
3598
3599 xscale_analyze_trace(target, cmd_ctx);
3600
3601 return ERROR_OK;
3602 }
3603
3604 int xscale_handle_cp15(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3605 {
3606 target_t *target = get_current_target(cmd_ctx);
3607 armv4_5_common_t *armv4_5;
3608 xscale_common_t *xscale;
3609
3610 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3611 {
3612 return ERROR_OK;
3613 }
3614
3615 if (target->state != TARGET_HALTED)
3616 {
3617 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3618 return ERROR_OK;
3619 }
3620 u32 reg_no = 0;
3621 reg_t *reg = NULL;
3622 if(argc > 0)
3623 {
3624 reg_no = strtoul(args[0], NULL, 0);
3625 /*translate from xscale cp15 register no to openocd register*/
3626 switch(reg_no)
3627 {
3628 case 0:
3629 reg_no = XSCALE_MAINID;
3630 break;
3631 case 1:
3632 reg_no = XSCALE_CTRL;
3633 break;
3634 case 2:
3635 reg_no = XSCALE_TTB;
3636 break;
3637 case 3:
3638 reg_no = XSCALE_DAC;
3639 break;
3640 case 5:
3641 reg_no = XSCALE_FSR;
3642 break;
3643 case 6:
3644 reg_no = XSCALE_FAR;
3645 break;
3646 case 13:
3647 reg_no = XSCALE_PID;
3648 break;
3649 case 15:
3650 reg_no = XSCALE_CPACCESS;
3651 break;
3652 default:
3653 command_print(cmd_ctx, "invalid register number");
3654 return ERROR_INVALID_ARGUMENTS;
3655 }
3656 reg = &xscale->reg_cache->reg_list[reg_no];
3657
3658 }
3659 if(argc == 1)
3660 {
3661 u32 value;
3662
3663 /* read cp15 control register */
3664 xscale_get_reg(reg);
3665 value = buf_get_u32(reg->value, 0, 32);
3666 command_print(cmd_ctx, "%s (/%i): 0x%x", reg->name, reg->size, value);
3667 }
3668 else if(argc == 2)
3669 {
3670
3671 u32 value = strtoul(args[1], NULL, 0);
3672
3673 /* send CP write request (command 0x41) */
3674 xscale_send_u32(target, 0x41);
3675
3676 /* send CP register number */
3677 xscale_send_u32(target, reg_no);
3678
3679 /* send CP register value */
3680 xscale_send_u32(target, value);
3681
3682 /* execute cpwait to ensure outstanding operations complete */
3683 xscale_send_u32(target, 0x53);
3684 }
3685 else
3686 {
3687 command_print(cmd_ctx, "usage: cp15 [register]<, [value]>");
3688 }
3689
3690 return ERROR_OK;
3691 }
3692
3693 int xscale_register_commands(struct command_context_s *cmd_ctx)
3694 {
3695 command_t *xscale_cmd;
3696
3697 xscale_cmd = register_command(cmd_ctx, NULL, "xscale", NULL, COMMAND_ANY, "xscale specific commands");
3698
3699 register_command(cmd_ctx, xscale_cmd, "debug_handler", xscale_handle_debug_handler_command, COMMAND_ANY, "'xscale debug_handler <target#> <address>' command takes two required operands");
3700 register_command(cmd_ctx, xscale_cmd, "cache_clean_address", xscale_handle_cache_clean_address_command, COMMAND_ANY, NULL);
3701
3702 register_command(cmd_ctx, xscale_cmd, "cache_info", xscale_handle_cache_info_command, COMMAND_EXEC, NULL);
3703 register_command(cmd_ctx, xscale_cmd, "mmu", xscale_handle_mmu_command, COMMAND_EXEC, "['enable'|'disable'] the MMU");
3704 register_command(cmd_ctx, xscale_cmd, "icache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the ICache");
3705 register_command(cmd_ctx, xscale_cmd, "dcache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the DCache");
3706
3707 register_command(cmd_ctx, xscale_cmd, "vector_catch", xscale_handle_idcache_command, COMMAND_EXEC, "<mask> of vectors that should be catched");
3708
3709 register_command(cmd_ctx, xscale_cmd, "trace_buffer", xscale_handle_trace_buffer_command, COMMAND_EXEC, "<enable|disable> ['fill' [n]|'wrap']");
3710
3711 register_command(cmd_ctx, xscale_cmd, "dump_trace", xscale_handle_dump_trace_command, COMMAND_EXEC, "dump content of trace buffer to <file>");
3712 register_command(cmd_ctx, xscale_cmd, "analyze_trace", xscale_handle_analyze_trace_buffer_command, COMMAND_EXEC, "analyze content of trace buffer");
3713 register_command(cmd_ctx, xscale_cmd, "trace_image", xscale_handle_trace_image_command,
3714 COMMAND_EXEC, "load image from <file> [base address]");
3715
3716 register_command(cmd_ctx, xscale_cmd, "cp15", xscale_handle_cp15, COMMAND_EXEC, "access coproc 15 <register> [value]");
3717
3718 armv4_5_register_commands(cmd_ctx);
3719
3720 return ERROR_OK;
3721 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)