7762ec2a96580ffad96eb08703444f0c4e093d66
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program; if not, write to the *
17 * Free Software Foundation, Inc., *
18 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
19 ***************************************************************************/
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "replacements.h"
25
26 #include "xscale.h"
27
28 #include "register.h"
29 #include "target.h"
30 #include "armv4_5.h"
31 #include "arm_simulator.h"
32 #include "arm_disassembler.h"
33 #include "log.h"
34 #include "jtag.h"
35 #include "binarybuffer.h"
36 #include "time_support.h"
37 #include "breakpoints.h"
38 #include "fileio.h"
39
40 #include <stdlib.h>
41 #include <string.h>
42
43 #include <sys/types.h>
44 #include <unistd.h>
45 #include <errno.h>
46
47
48 /* cli handling */
49 int xscale_register_commands(struct command_context_s *cmd_ctx);
50
51 /* forward declarations */
52 int xscale_target_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc, struct target_s *target);
53 int xscale_init_target(struct command_context_s *cmd_ctx, struct target_s *target);
54 int xscale_quit();
55
56 int xscale_arch_state(struct target_s *target);
57 int xscale_poll(target_t *target);
58 int xscale_halt(target_t *target);
59 int xscale_resume(struct target_s *target, int current, u32 address, int handle_breakpoints, int debug_execution);
60 int xscale_step(struct target_s *target, int current, u32 address, int handle_breakpoints);
61 int xscale_debug_entry(target_t *target);
62 int xscale_restore_context(target_t *target);
63
64 int xscale_assert_reset(target_t *target);
65 int xscale_deassert_reset(target_t *target);
66 int xscale_soft_reset_halt(struct target_s *target);
67
68 int xscale_set_reg_u32(reg_t *reg, u32 value);
69
70 int xscale_read_core_reg(struct target_s *target, int num, enum armv4_5_mode mode);
71 int xscale_write_core_reg(struct target_s *target, int num, enum armv4_5_mode mode, u32 value);
72
73 int xscale_read_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer);
74 int xscale_write_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer);
75 int xscale_bulk_write_memory(target_t *target, u32 address, u32 count, u8 *buffer);
76 int xscale_checksum_memory(struct target_s *target, u32 address, u32 count, u32* checksum);
77
78 int xscale_add_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
79 int xscale_remove_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
80 int xscale_set_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
81 int xscale_unset_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
82 int xscale_add_watchpoint(struct target_s *target, watchpoint_t *watchpoint);
83 int xscale_remove_watchpoint(struct target_s *target, watchpoint_t *watchpoint);
84 void xscale_enable_watchpoints(struct target_s *target);
85 void xscale_enable_breakpoints(struct target_s *target);
86 static int xscale_virt2phys(struct target_s *target, u32 virtual, u32 *physical);
87 static int xscale_mmu(struct target_s *target, int *enabled);
88
89 int xscale_read_trace(target_t *target);
90
91 target_type_t xscale_target =
92 {
93 .name = "xscale",
94
95 .poll = xscale_poll,
96 .arch_state = xscale_arch_state,
97
98 .target_request_data = NULL,
99
100 .halt = xscale_halt,
101 .resume = xscale_resume,
102 .step = xscale_step,
103
104 .assert_reset = xscale_assert_reset,
105 .deassert_reset = xscale_deassert_reset,
106 .soft_reset_halt = xscale_soft_reset_halt,
107
108 .get_gdb_reg_list = armv4_5_get_gdb_reg_list,
109
110 .read_memory = xscale_read_memory,
111 .write_memory = xscale_write_memory,
112 .bulk_write_memory = xscale_bulk_write_memory,
113 .checksum_memory = xscale_checksum_memory,
114
115 .run_algorithm = armv4_5_run_algorithm,
116
117 .add_breakpoint = xscale_add_breakpoint,
118 .remove_breakpoint = xscale_remove_breakpoint,
119 .add_watchpoint = xscale_add_watchpoint,
120 .remove_watchpoint = xscale_remove_watchpoint,
121
122 .register_commands = xscale_register_commands,
123 .target_command = xscale_target_command,
124 .init_target = xscale_init_target,
125 .quit = xscale_quit,
126
127 .virt2phys = xscale_virt2phys,
128 .mmu = xscale_mmu
129 };
130
131 char* xscale_reg_list[] =
132 {
133 "XSCALE_MAINID", /* 0 */
134 "XSCALE_CACHETYPE",
135 "XSCALE_CTRL",
136 "XSCALE_AUXCTRL",
137 "XSCALE_TTB",
138 "XSCALE_DAC",
139 "XSCALE_FSR",
140 "XSCALE_FAR",
141 "XSCALE_PID",
142 "XSCALE_CPACCESS",
143 "XSCALE_IBCR0", /* 10 */
144 "XSCALE_IBCR1",
145 "XSCALE_DBR0",
146 "XSCALE_DBR1",
147 "XSCALE_DBCON",
148 "XSCALE_TBREG",
149 "XSCALE_CHKPT0",
150 "XSCALE_CHKPT1",
151 "XSCALE_DCSR",
152 "XSCALE_TX",
153 "XSCALE_RX", /* 20 */
154 "XSCALE_TXRXCTRL",
155 };
156
157 xscale_reg_t xscale_reg_arch_info[] =
158 {
159 {XSCALE_MAINID, NULL},
160 {XSCALE_CACHETYPE, NULL},
161 {XSCALE_CTRL, NULL},
162 {XSCALE_AUXCTRL, NULL},
163 {XSCALE_TTB, NULL},
164 {XSCALE_DAC, NULL},
165 {XSCALE_FSR, NULL},
166 {XSCALE_FAR, NULL},
167 {XSCALE_PID, NULL},
168 {XSCALE_CPACCESS, NULL},
169 {XSCALE_IBCR0, NULL},
170 {XSCALE_IBCR1, NULL},
171 {XSCALE_DBR0, NULL},
172 {XSCALE_DBR1, NULL},
173 {XSCALE_DBCON, NULL},
174 {XSCALE_TBREG, NULL},
175 {XSCALE_CHKPT0, NULL},
176 {XSCALE_CHKPT1, NULL},
177 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
178 {-1, NULL}, /* TX accessed via JTAG */
179 {-1, NULL}, /* RX accessed via JTAG */
180 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
181 };
182
183 int xscale_reg_arch_type = -1;
184
185 int xscale_get_reg(reg_t *reg);
186 int xscale_set_reg(reg_t *reg, u8 *buf);
187
188 int xscale_get_arch_pointers(target_t *target, armv4_5_common_t **armv4_5_p, xscale_common_t **xscale_p)
189 {
190 armv4_5_common_t *armv4_5 = target->arch_info;
191 xscale_common_t *xscale = armv4_5->arch_info;
192
193 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
194 {
195 LOG_ERROR("target isn't an XScale target");
196 return -1;
197 }
198
199 if (xscale->common_magic != XSCALE_COMMON_MAGIC)
200 {
201 LOG_ERROR("target isn't an XScale target");
202 return -1;
203 }
204
205 *armv4_5_p = armv4_5;
206 *xscale_p = xscale;
207
208 return ERROR_OK;
209 }
210
211 int xscale_jtag_set_instr(int chain_pos, u32 new_instr)
212 {
213 jtag_device_t *device = jtag_get_device(chain_pos);
214
215 if (buf_get_u32(device->cur_instr, 0, device->ir_length) != new_instr)
216 {
217 scan_field_t field;
218
219 field.device = chain_pos;
220 field.num_bits = device->ir_length;
221 field.out_value = calloc(CEIL(field.num_bits, 8), 1);
222 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
223 field.out_mask = NULL;
224 field.in_value = NULL;
225 jtag_set_check_value(&field, device->expected, device->expected_mask, NULL);
226
227 jtag_add_ir_scan(1, &field, -1);
228
229 free(field.out_value);
230 }
231
232 return ERROR_OK;
233 }
234
235 int xscale_read_dcsr(target_t *target)
236 {
237 armv4_5_common_t *armv4_5 = target->arch_info;
238 xscale_common_t *xscale = armv4_5->arch_info;
239
240 int retval;
241
242 scan_field_t fields[3];
243 u8 field0 = 0x0;
244 u8 field0_check_value = 0x2;
245 u8 field0_check_mask = 0x7;
246 u8 field2 = 0x0;
247 u8 field2_check_value = 0x0;
248 u8 field2_check_mask = 0x1;
249
250 jtag_add_end_state(TAP_PD);
251 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dcsr);
252
253 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
254 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
255
256 fields[0].device = xscale->jtag_info.chain_pos;
257 fields[0].num_bits = 3;
258 fields[0].out_value = &field0;
259 fields[0].out_mask = NULL;
260 fields[0].in_value = NULL;
261 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
262
263 fields[1].device = xscale->jtag_info.chain_pos;
264 fields[1].num_bits = 32;
265 fields[1].out_value = NULL;
266 fields[1].out_mask = NULL;
267 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
268 fields[1].in_handler = NULL;
269 fields[1].in_handler_priv = NULL;
270 fields[1].in_check_value = NULL;
271 fields[1].in_check_mask = NULL;
272
273 fields[2].device = xscale->jtag_info.chain_pos;
274 fields[2].num_bits = 1;
275 fields[2].out_value = &field2;
276 fields[2].out_mask = NULL;
277 fields[2].in_value = NULL;
278 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
279
280 jtag_add_dr_scan(3, fields, -1);
281
282 if ((retval = jtag_execute_queue()) != ERROR_OK)
283 {
284 LOG_ERROR("JTAG error while reading DCSR");
285 return retval;
286 }
287
288 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
289 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
290
291 /* write the register with the value we just read
292 * on this second pass, only the first bit of field0 is guaranteed to be 0)
293 */
294 field0_check_mask = 0x1;
295 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
296 fields[1].in_value = NULL;
297
298 jtag_add_end_state(TAP_RTI);
299
300 jtag_add_dr_scan(3, fields, -1);
301
302 /* DANGER!!! this must be here. It will make sure that the arguments
303 * to jtag_set_check_value() does not go out of scope! */
304 return jtag_execute_queue();
305 }
306
307 int xscale_receive(target_t *target, u32 *buffer, int num_words)
308 {
309 if (num_words==0)
310 return ERROR_INVALID_ARGUMENTS;
311
312 int retval=ERROR_OK;
313 armv4_5_common_t *armv4_5 = target->arch_info;
314 xscale_common_t *xscale = armv4_5->arch_info;
315
316 enum tap_state path[3];
317 scan_field_t fields[3];
318
319 u8 *field0 = malloc(num_words * 1);
320 u8 field0_check_value = 0x2;
321 u8 field0_check_mask = 0x6;
322 u32 *field1 = malloc(num_words * 4);
323 u8 field2_check_value = 0x0;
324 u8 field2_check_mask = 0x1;
325 int words_done = 0;
326 int words_scheduled = 0;
327
328 int i;
329
330 path[0] = TAP_SDS;
331 path[1] = TAP_CD;
332 path[2] = TAP_SD;
333
334 fields[0].device = xscale->jtag_info.chain_pos;
335 fields[0].num_bits = 3;
336 fields[0].out_value = NULL;
337 fields[0].out_mask = NULL;
338 fields[0].in_value = NULL;
339 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
340
341 fields[1].device = xscale->jtag_info.chain_pos;
342 fields[1].num_bits = 32;
343 fields[1].out_value = NULL;
344 fields[1].out_mask = NULL;
345 fields[1].in_value = NULL;
346 fields[1].in_handler = NULL;
347 fields[1].in_handler_priv = NULL;
348 fields[1].in_check_value = NULL;
349 fields[1].in_check_mask = NULL;
350
351
352
353 fields[2].device = xscale->jtag_info.chain_pos;
354 fields[2].num_bits = 1;
355 fields[2].out_value = NULL;
356 fields[2].out_mask = NULL;
357 fields[2].in_value = NULL;
358 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
359
360 jtag_add_end_state(TAP_RTI);
361 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgtx);
362 jtag_add_runtest(1, -1); /* ensures that we're in the TAP_RTI state as the above could be a no-op */
363
364 /* repeat until all words have been collected */
365 int attempts=0;
366 while (words_done < num_words)
367 {
368 /* schedule reads */
369 words_scheduled = 0;
370 for (i = words_done; i < num_words; i++)
371 {
372 fields[0].in_value = &field0[i];
373 fields[1].in_handler = buf_to_u32_handler;
374 fields[1].in_handler_priv = (u8*)&field1[i];
375
376 jtag_add_pathmove(3, path);
377 jtag_add_dr_scan(3, fields, TAP_RTI);
378 words_scheduled++;
379 }
380
381 if ((retval = jtag_execute_queue()) != ERROR_OK)
382 {
383 LOG_ERROR("JTAG error while receiving data from debug handler");
384 break;
385 }
386
387 /* examine results */
388 for (i = words_done; i < num_words; i++)
389 {
390 if (!(field0[0] & 1))
391 {
392 /* move backwards if necessary */
393 int j;
394 for (j = i; j < num_words - 1; j++)
395 {
396 field0[j] = field0[j+1];
397 field1[j] = field1[j+1];
398 }
399 words_scheduled--;
400 }
401 }
402 if (words_scheduled==0)
403 {
404 if (attempts++==1000)
405 {
406 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
407 retval=ERROR_TARGET_TIMEOUT;
408 break;
409 }
410 }
411
412 words_done += words_scheduled;
413 }
414
415 for (i = 0; i < num_words; i++)
416 *(buffer++) = buf_get_u32((u8*)&field1[i], 0, 32);
417
418 free(field1);
419
420 return retval;
421 }
422
423 int xscale_read_tx(target_t *target, int consume)
424 {
425 armv4_5_common_t *armv4_5 = target->arch_info;
426 xscale_common_t *xscale = armv4_5->arch_info;
427 enum tap_state path[3];
428 enum tap_state noconsume_path[6];
429
430 int retval;
431 struct timeval timeout, now;
432
433 scan_field_t fields[3];
434 u8 field0_in = 0x0;
435 u8 field0_check_value = 0x2;
436 u8 field0_check_mask = 0x6;
437 u8 field2_check_value = 0x0;
438 u8 field2_check_mask = 0x1;
439
440 jtag_add_end_state(TAP_RTI);
441
442 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgtx);
443
444 path[0] = TAP_SDS;
445 path[1] = TAP_CD;
446 path[2] = TAP_SD;
447
448 noconsume_path[0] = TAP_SDS;
449 noconsume_path[1] = TAP_CD;
450 noconsume_path[2] = TAP_E1D;
451 noconsume_path[3] = TAP_PD;
452 noconsume_path[4] = TAP_E2D;
453 noconsume_path[5] = TAP_SD;
454
455 fields[0].device = xscale->jtag_info.chain_pos;
456 fields[0].num_bits = 3;
457 fields[0].out_value = NULL;
458 fields[0].out_mask = NULL;
459 fields[0].in_value = &field0_in;
460 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
461
462 fields[1].device = xscale->jtag_info.chain_pos;
463 fields[1].num_bits = 32;
464 fields[1].out_value = NULL;
465 fields[1].out_mask = NULL;
466 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
467 fields[1].in_handler = NULL;
468 fields[1].in_handler_priv = NULL;
469 fields[1].in_check_value = NULL;
470 fields[1].in_check_mask = NULL;
471
472
473
474 fields[2].device = xscale->jtag_info.chain_pos;
475 fields[2].num_bits = 1;
476 fields[2].out_value = NULL;
477 fields[2].out_mask = NULL;
478 fields[2].in_value = NULL;
479 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
480
481 gettimeofday(&timeout, NULL);
482 timeval_add_time(&timeout, 1, 0);
483
484 for (;;)
485 {
486 int i;
487 for (i=0; i<100; i++)
488 {
489 /* if we want to consume the register content (i.e. clear TX_READY),
490 * we have to go straight from Capture-DR to Shift-DR
491 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
492 */
493 if (consume)
494 jtag_add_pathmove(3, path);
495 else
496 {
497 jtag_add_pathmove(sizeof(noconsume_path)/sizeof(*noconsume_path), noconsume_path);
498 }
499
500 jtag_add_dr_scan(3, fields, TAP_RTI);
501
502 if ((retval = jtag_execute_queue()) != ERROR_OK)
503 {
504 LOG_ERROR("JTAG error while reading TX");
505 return ERROR_TARGET_TIMEOUT;
506 }
507
508 gettimeofday(&now, NULL);
509 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
510 {
511 LOG_ERROR("time out reading TX register");
512 return ERROR_TARGET_TIMEOUT;
513 }
514 if (!((!(field0_in & 1)) && consume))
515 {
516 goto done;
517 }
518 }
519 LOG_DEBUG("waiting 10ms");
520 usleep(10*1000); /* avoid flooding the logs */
521 }
522 done:
523
524 if (!(field0_in & 1))
525 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
526
527 return ERROR_OK;
528 }
529
530 int xscale_write_rx(target_t *target)
531 {
532 armv4_5_common_t *armv4_5 = target->arch_info;
533 xscale_common_t *xscale = armv4_5->arch_info;
534
535 int retval;
536 struct timeval timeout, now;
537
538 scan_field_t fields[3];
539 u8 field0_out = 0x0;
540 u8 field0_in = 0x0;
541 u8 field0_check_value = 0x2;
542 u8 field0_check_mask = 0x6;
543 u8 field2 = 0x0;
544 u8 field2_check_value = 0x0;
545 u8 field2_check_mask = 0x1;
546
547 jtag_add_end_state(TAP_RTI);
548
549 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgrx);
550
551 fields[0].device = xscale->jtag_info.chain_pos;
552 fields[0].num_bits = 3;
553 fields[0].out_value = &field0_out;
554 fields[0].out_mask = NULL;
555 fields[0].in_value = &field0_in;
556 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
557
558 fields[1].device = xscale->jtag_info.chain_pos;
559 fields[1].num_bits = 32;
560 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
561 fields[1].out_mask = NULL;
562 fields[1].in_value = NULL;
563 fields[1].in_handler = NULL;
564 fields[1].in_handler_priv = NULL;
565 fields[1].in_check_value = NULL;
566 fields[1].in_check_mask = NULL;
567
568
569
570 fields[2].device = xscale->jtag_info.chain_pos;
571 fields[2].num_bits = 1;
572 fields[2].out_value = &field2;
573 fields[2].out_mask = NULL;
574 fields[2].in_value = NULL;
575 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
576
577 gettimeofday(&timeout, NULL);
578 timeval_add_time(&timeout, 1, 0);
579
580 /* poll until rx_read is low */
581 LOG_DEBUG("polling RX");
582 for (;;)
583 {
584 int i;
585 for (i=0; i<10; i++)
586 {
587 jtag_add_dr_scan(3, fields, TAP_RTI);
588
589 if ((retval = jtag_execute_queue()) != ERROR_OK)
590 {
591 LOG_ERROR("JTAG error while writing RX");
592 return retval;
593 }
594
595 gettimeofday(&now, NULL);
596 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
597 {
598 LOG_ERROR("time out writing RX register");
599 return ERROR_TARGET_TIMEOUT;
600 }
601 if (!(field0_in & 1))
602 goto done;
603 }
604 LOG_DEBUG("waiting 10ms");
605 usleep(10*1000); /* wait 10ms to avoid flooding the logs */
606 }
607 done:
608
609 /* set rx_valid */
610 field2 = 0x1;
611 jtag_add_dr_scan(3, fields, TAP_RTI);
612
613 if ((retval = jtag_execute_queue()) != ERROR_OK)
614 {
615 LOG_ERROR("JTAG error while writing RX");
616 return retval;
617 }
618
619 return ERROR_OK;
620 }
621
622 /* send count elements of size byte to the debug handler */
623 int xscale_send(target_t *target, u8 *buffer, int count, int size)
624 {
625 armv4_5_common_t *armv4_5 = target->arch_info;
626 xscale_common_t *xscale = armv4_5->arch_info;
627 u32 t[3];
628 int bits[3];
629
630 int retval;
631
632 int done_count = 0;
633
634 jtag_add_end_state(TAP_RTI);
635
636 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgrx);
637
638 bits[0]=3;
639 t[0]=0;
640 bits[1]=32;
641 t[2]=1;
642 bits[2]=1;
643 int endianness = target->endianness;
644 while (done_count++ < count)
645 {
646 switch (size)
647 {
648 case 4:
649 if (endianness == TARGET_LITTLE_ENDIAN)
650 {
651 t[1]=le_to_h_u32(buffer);
652 } else
653 {
654 t[1]=be_to_h_u32(buffer);
655 }
656 break;
657 case 2:
658 if (endianness == TARGET_LITTLE_ENDIAN)
659 {
660 t[1]=le_to_h_u16(buffer);
661 } else
662 {
663 t[1]=be_to_h_u16(buffer);
664 }
665 break;
666 case 1:
667 t[1]=buffer[0];
668 break;
669 default:
670 LOG_ERROR("BUG: size neither 4, 2 nor 1");
671 exit(-1);
672 }
673 jtag_add_dr_out(xscale->jtag_info.chain_pos,
674 3,
675 bits,
676 t,
677 TAP_RTI);
678 buffer += size;
679 }
680
681 if ((retval = jtag_execute_queue()) != ERROR_OK)
682 {
683 LOG_ERROR("JTAG error while sending data to debug handler");
684 return retval;
685 }
686
687 return ERROR_OK;
688 }
689
690 int xscale_send_u32(target_t *target, u32 value)
691 {
692 armv4_5_common_t *armv4_5 = target->arch_info;
693 xscale_common_t *xscale = armv4_5->arch_info;
694
695 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
696 return xscale_write_rx(target);
697 }
698
699 int xscale_write_dcsr(target_t *target, int hold_rst, int ext_dbg_brk)
700 {
701 armv4_5_common_t *armv4_5 = target->arch_info;
702 xscale_common_t *xscale = armv4_5->arch_info;
703
704 int retval;
705
706 scan_field_t fields[3];
707 u8 field0 = 0x0;
708 u8 field0_check_value = 0x2;
709 u8 field0_check_mask = 0x7;
710 u8 field2 = 0x0;
711 u8 field2_check_value = 0x0;
712 u8 field2_check_mask = 0x1;
713
714 if (hold_rst != -1)
715 xscale->hold_rst = hold_rst;
716
717 if (ext_dbg_brk != -1)
718 xscale->external_debug_break = ext_dbg_brk;
719
720 jtag_add_end_state(TAP_RTI);
721 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dcsr);
722
723 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
724 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
725
726 fields[0].device = xscale->jtag_info.chain_pos;
727 fields[0].num_bits = 3;
728 fields[0].out_value = &field0;
729 fields[0].out_mask = NULL;
730 fields[0].in_value = NULL;
731 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
732
733 fields[1].device = xscale->jtag_info.chain_pos;
734 fields[1].num_bits = 32;
735 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
736 fields[1].out_mask = NULL;
737 fields[1].in_value = NULL;
738 fields[1].in_handler = NULL;
739 fields[1].in_handler_priv = NULL;
740 fields[1].in_check_value = NULL;
741 fields[1].in_check_mask = NULL;
742
743
744
745 fields[2].device = xscale->jtag_info.chain_pos;
746 fields[2].num_bits = 1;
747 fields[2].out_value = &field2;
748 fields[2].out_mask = NULL;
749 fields[2].in_value = NULL;
750 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
751
752 jtag_add_dr_scan(3, fields, -1);
753
754 if ((retval = jtag_execute_queue()) != ERROR_OK)
755 {
756 LOG_ERROR("JTAG error while writing DCSR");
757 return retval;
758 }
759
760 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
761 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
762
763 return ERROR_OK;
764 }
765
766 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
767 unsigned int parity (unsigned int v)
768 {
769 unsigned int ov = v;
770 v ^= v >> 16;
771 v ^= v >> 8;
772 v ^= v >> 4;
773 v &= 0xf;
774 LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
775 return (0x6996 >> v) & 1;
776 }
777
778 int xscale_load_ic(target_t *target, int mini, u32 va, u32 buffer[8])
779 {
780 armv4_5_common_t *armv4_5 = target->arch_info;
781 xscale_common_t *xscale = armv4_5->arch_info;
782 u8 packet[4];
783 u8 cmd;
784 int word;
785
786 scan_field_t fields[2];
787
788 LOG_DEBUG("loading miniIC at 0x%8.8x", va);
789
790 jtag_add_end_state(TAP_RTI);
791 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.ldic); /* LDIC */
792
793 /* CMD is b010 for Main IC and b011 for Mini IC */
794 if (mini)
795 buf_set_u32(&cmd, 0, 3, 0x3);
796 else
797 buf_set_u32(&cmd, 0, 3, 0x2);
798
799 buf_set_u32(&cmd, 3, 3, 0x0);
800
801 /* virtual address of desired cache line */
802 buf_set_u32(packet, 0, 27, va >> 5);
803
804 fields[0].device = xscale->jtag_info.chain_pos;
805 fields[0].num_bits = 6;
806 fields[0].out_value = &cmd;
807 fields[0].out_mask = NULL;
808 fields[0].in_value = NULL;
809 fields[0].in_check_value = NULL;
810 fields[0].in_check_mask = NULL;
811 fields[0].in_handler = NULL;
812 fields[0].in_handler_priv = NULL;
813
814 fields[1].device = xscale->jtag_info.chain_pos;
815 fields[1].num_bits = 27;
816 fields[1].out_value = packet;
817 fields[1].out_mask = NULL;
818 fields[1].in_value = NULL;
819 fields[1].in_check_value = NULL;
820 fields[1].in_check_mask = NULL;
821 fields[1].in_handler = NULL;
822 fields[1].in_handler_priv = NULL;
823
824 jtag_add_dr_scan(2, fields, -1);
825
826 fields[0].num_bits = 32;
827 fields[0].out_value = packet;
828
829 fields[1].num_bits = 1;
830 fields[1].out_value = &cmd;
831
832 for (word = 0; word < 8; word++)
833 {
834 buf_set_u32(packet, 0, 32, buffer[word]);
835 cmd = parity(*((u32*)packet));
836 jtag_add_dr_scan(2, fields, -1);
837 }
838
839 jtag_execute_queue();
840
841 return ERROR_OK;
842 }
843
844 int xscale_invalidate_ic_line(target_t *target, u32 va)
845 {
846 armv4_5_common_t *armv4_5 = target->arch_info;
847 xscale_common_t *xscale = armv4_5->arch_info;
848 u8 packet[4];
849 u8 cmd;
850
851 scan_field_t fields[2];
852
853 jtag_add_end_state(TAP_RTI);
854 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.ldic); /* LDIC */
855
856 /* CMD for invalidate IC line b000, bits [6:4] b000 */
857 buf_set_u32(&cmd, 0, 6, 0x0);
858
859 /* virtual address of desired cache line */
860 buf_set_u32(packet, 0, 27, va >> 5);
861
862 fields[0].device = xscale->jtag_info.chain_pos;
863 fields[0].num_bits = 6;
864 fields[0].out_value = &cmd;
865 fields[0].out_mask = NULL;
866 fields[0].in_value = NULL;
867 fields[0].in_check_value = NULL;
868 fields[0].in_check_mask = NULL;
869 fields[0].in_handler = NULL;
870 fields[0].in_handler_priv = NULL;
871
872 fields[1].device = xscale->jtag_info.chain_pos;
873 fields[1].num_bits = 27;
874 fields[1].out_value = packet;
875 fields[1].out_mask = NULL;
876 fields[1].in_value = NULL;
877 fields[1].in_check_value = NULL;
878 fields[1].in_check_mask = NULL;
879 fields[1].in_handler = NULL;
880 fields[1].in_handler_priv = NULL;
881
882 jtag_add_dr_scan(2, fields, -1);
883
884 return ERROR_OK;
885 }
886
887 int xscale_update_vectors(target_t *target)
888 {
889 armv4_5_common_t *armv4_5 = target->arch_info;
890 xscale_common_t *xscale = armv4_5->arch_info;
891 int i;
892 int retval;
893
894 u32 low_reset_branch, high_reset_branch;
895
896 for (i = 1; i < 8; i++)
897 {
898 /* if there's a static vector specified for this exception, override */
899 if (xscale->static_high_vectors_set & (1 << i))
900 {
901 xscale->high_vectors[i] = xscale->static_high_vectors[i];
902 }
903 else
904 {
905 retval=target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
906 if (retval == ERROR_TARGET_TIMEOUT)
907 return retval;
908 if (retval!=ERROR_OK)
909 {
910 /* Some of these reads will fail as part of normal execution */
911 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
912 }
913 }
914 }
915
916 for (i = 1; i < 8; i++)
917 {
918 if (xscale->static_low_vectors_set & (1 << i))
919 {
920 xscale->low_vectors[i] = xscale->static_low_vectors[i];
921 }
922 else
923 {
924 retval=target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
925 if (retval == ERROR_TARGET_TIMEOUT)
926 return retval;
927 if (retval!=ERROR_OK)
928 {
929 /* Some of these reads will fail as part of normal execution */
930 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
931 }
932 }
933 }
934
935 /* calculate branches to debug handler */
936 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
937 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
938
939 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
940 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
941
942 /* invalidate and load exception vectors in mini i-cache */
943 xscale_invalidate_ic_line(target, 0x0);
944 xscale_invalidate_ic_line(target, 0xffff0000);
945
946 xscale_load_ic(target, 1, 0x0, xscale->low_vectors);
947 xscale_load_ic(target, 1, 0xffff0000, xscale->high_vectors);
948
949 return ERROR_OK;
950 }
951
952 int xscale_arch_state(struct target_s *target)
953 {
954 armv4_5_common_t *armv4_5 = target->arch_info;
955 xscale_common_t *xscale = armv4_5->arch_info;
956
957 char *state[] =
958 {
959 "disabled", "enabled"
960 };
961
962 char *arch_dbg_reason[] =
963 {
964 "", "\n(processor reset)", "\n(trace buffer full)"
965 };
966
967 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
968 {
969 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
970 exit(-1);
971 }
972
973 LOG_USER("target halted in %s state due to %s, current mode: %s\n"
974 "cpsr: 0x%8.8x pc: 0x%8.8x\n"
975 "MMU: %s, D-Cache: %s, I-Cache: %s"
976 "%s",
977 armv4_5_state_strings[armv4_5->core_state],
978 target_debug_reason_strings[target->debug_reason],
979 armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)],
980 buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32),
981 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32),
982 state[xscale->armv4_5_mmu.mmu_enabled],
983 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
984 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
985 arch_dbg_reason[xscale->arch_debug_reason]);
986
987 return ERROR_OK;
988 }
989
990 int xscale_poll(target_t *target)
991 {
992 int retval=ERROR_OK;
993 armv4_5_common_t *armv4_5 = target->arch_info;
994 xscale_common_t *xscale = armv4_5->arch_info;
995
996 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
997 {
998 enum target_state previous_state = target->state;
999 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
1000 {
1001
1002 /* there's data to read from the tx register, we entered debug state */
1003 xscale->handler_running = 1;
1004
1005 target->state = TARGET_HALTED;
1006
1007 /* process debug entry, fetching current mode regs */
1008 retval = xscale_debug_entry(target);
1009 }
1010 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
1011 {
1012 LOG_USER("error while polling TX register, reset CPU");
1013 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
1014 target->state = TARGET_HALTED;
1015 }
1016
1017 /* debug_entry could have overwritten target state (i.e. immediate resume)
1018 * don't signal event handlers in that case
1019 */
1020 if (target->state != TARGET_HALTED)
1021 return ERROR_OK;
1022
1023 /* if target was running, signal that we halted
1024 * otherwise we reentered from debug execution */
1025 if (previous_state == TARGET_RUNNING)
1026 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1027 else
1028 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
1029 }
1030
1031 return retval;
1032 }
1033
1034 int xscale_debug_entry(target_t *target)
1035 {
1036 armv4_5_common_t *armv4_5 = target->arch_info;
1037 xscale_common_t *xscale = armv4_5->arch_info;
1038 u32 pc;
1039 u32 buffer[10];
1040 int i;
1041 int retval;
1042
1043 u32 moe;
1044
1045 /* clear external dbg break (will be written on next DCSR read) */
1046 xscale->external_debug_break = 0;
1047 if ((retval=xscale_read_dcsr(target))!=ERROR_OK)
1048 return retval;
1049
1050 /* get r0, pc, r1 to r7 and cpsr */
1051 if ((retval=xscale_receive(target, buffer, 10))!=ERROR_OK)
1052 return retval;
1053
1054 /* move r0 from buffer to register cache */
1055 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
1056 armv4_5->core_cache->reg_list[15].dirty = 1;
1057 armv4_5->core_cache->reg_list[15].valid = 1;
1058 LOG_DEBUG("r0: 0x%8.8x", buffer[0]);
1059
1060 /* move pc from buffer to register cache */
1061 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
1062 armv4_5->core_cache->reg_list[15].dirty = 1;
1063 armv4_5->core_cache->reg_list[15].valid = 1;
1064 LOG_DEBUG("pc: 0x%8.8x", buffer[1]);
1065
1066 /* move data from buffer to register cache */
1067 for (i = 1; i <= 7; i++)
1068 {
1069 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
1070 armv4_5->core_cache->reg_list[i].dirty = 1;
1071 armv4_5->core_cache->reg_list[i].valid = 1;
1072 LOG_DEBUG("r%i: 0x%8.8x", i, buffer[i + 1]);
1073 }
1074
1075 buf_set_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32, buffer[9]);
1076 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty = 1;
1077 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].valid = 1;
1078 LOG_DEBUG("cpsr: 0x%8.8x", buffer[9]);
1079
1080 armv4_5->core_mode = buffer[9] & 0x1f;
1081 if (armv4_5_mode_to_number(armv4_5->core_mode) == -1)
1082 {
1083 target->state = TARGET_UNKNOWN;
1084 LOG_ERROR("cpsr contains invalid mode value - communication failure");
1085 return ERROR_TARGET_FAILURE;
1086 }
1087 LOG_DEBUG("target entered debug state in %s mode", armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)]);
1088
1089 if (buffer[9] & 0x20)
1090 armv4_5->core_state = ARMV4_5_STATE_THUMB;
1091 else
1092 armv4_5->core_state = ARMV4_5_STATE_ARM;
1093
1094 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1095 if ((armv4_5->core_mode != ARMV4_5_MODE_USR) && (armv4_5->core_mode != ARMV4_5_MODE_SYS))
1096 {
1097 xscale_receive(target, buffer, 8);
1098 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1099 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).dirty = 0;
1100 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).valid = 1;
1101 }
1102 else
1103 {
1104 /* r8 to r14, but no spsr */
1105 xscale_receive(target, buffer, 7);
1106 }
1107
1108 /* move data from buffer to register cache */
1109 for (i = 8; i <= 14; i++)
1110 {
1111 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).value, 0, 32, buffer[i - 8]);
1112 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 0;
1113 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid = 1;
1114 }
1115
1116 /* examine debug reason */
1117 xscale_read_dcsr(target);
1118 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
1119
1120 /* stored PC (for calculating fixup) */
1121 pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1122
1123 switch (moe)
1124 {
1125 case 0x0: /* Processor reset */
1126 target->debug_reason = DBG_REASON_DBGRQ;
1127 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
1128 pc -= 4;
1129 break;
1130 case 0x1: /* Instruction breakpoint hit */
1131 target->debug_reason = DBG_REASON_BREAKPOINT;
1132 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1133 pc -= 4;
1134 break;
1135 case 0x2: /* Data breakpoint hit */
1136 target->debug_reason = DBG_REASON_WATCHPOINT;
1137 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1138 pc -= 4;
1139 break;
1140 case 0x3: /* BKPT instruction executed */
1141 target->debug_reason = DBG_REASON_BREAKPOINT;
1142 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1143 pc -= 4;
1144 break;
1145 case 0x4: /* Ext. debug event */
1146 target->debug_reason = DBG_REASON_DBGRQ;
1147 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1148 pc -= 4;
1149 break;
1150 case 0x5: /* Vector trap occured */
1151 target->debug_reason = DBG_REASON_BREAKPOINT;
1152 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1153 pc -= 4;
1154 break;
1155 case 0x6: /* Trace buffer full break */
1156 target->debug_reason = DBG_REASON_DBGRQ;
1157 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1158 pc -= 4;
1159 break;
1160 case 0x7: /* Reserved */
1161 default:
1162 LOG_ERROR("Method of Entry is 'Reserved'");
1163 exit(-1);
1164 break;
1165 }
1166
1167 /* apply PC fixup */
1168 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
1169
1170 /* on the first debug entry, identify cache type */
1171 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1172 {
1173 u32 cache_type_reg;
1174
1175 /* read cp15 cache type register */
1176 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1177 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1178
1179 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1180 }
1181
1182 /* examine MMU and Cache settings */
1183 /* read cp15 control register */
1184 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1185 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1186 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1187 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1188 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1189
1190 /* tracing enabled, read collected trace data */
1191 if (xscale->trace.buffer_enabled)
1192 {
1193 xscale_read_trace(target);
1194 xscale->trace.buffer_fill--;
1195
1196 /* resume if we're still collecting trace data */
1197 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1198 && (xscale->trace.buffer_fill > 0))
1199 {
1200 xscale_resume(target, 1, 0x0, 1, 0);
1201 }
1202 else
1203 {
1204 xscale->trace.buffer_enabled = 0;
1205 }
1206 }
1207
1208 return ERROR_OK;
1209 }
1210
1211 int xscale_halt(target_t *target)
1212 {
1213 armv4_5_common_t *armv4_5 = target->arch_info;
1214 xscale_common_t *xscale = armv4_5->arch_info;
1215
1216 LOG_DEBUG("target->state: %s", target_state_strings[target->state]);
1217
1218 if (target->state == TARGET_HALTED)
1219 {
1220 LOG_DEBUG("target was already halted");
1221 return ERROR_OK;
1222 }
1223 else if (target->state == TARGET_UNKNOWN)
1224 {
1225 /* this must not happen for a xscale target */
1226 LOG_ERROR("target was in unknown state when halt was requested");
1227 return ERROR_TARGET_INVALID;
1228 }
1229 else if (target->state == TARGET_RESET)
1230 {
1231 LOG_DEBUG("target->state == TARGET_RESET");
1232 }
1233 else
1234 {
1235 /* assert external dbg break */
1236 xscale->external_debug_break = 1;
1237 xscale_read_dcsr(target);
1238
1239 target->debug_reason = DBG_REASON_DBGRQ;
1240 }
1241
1242 return ERROR_OK;
1243 }
1244
1245 int xscale_enable_single_step(struct target_s *target, u32 next_pc)
1246 {
1247 armv4_5_common_t *armv4_5 = target->arch_info;
1248 xscale_common_t *xscale= armv4_5->arch_info;
1249 reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1250
1251 if (xscale->ibcr0_used)
1252 {
1253 breakpoint_t *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1254
1255 if (ibcr0_bp)
1256 {
1257 xscale_unset_breakpoint(target, ibcr0_bp);
1258 }
1259 else
1260 {
1261 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1262 exit(-1);
1263 }
1264 }
1265
1266 xscale_set_reg_u32(ibcr0, next_pc | 0x1);
1267
1268 return ERROR_OK;
1269 }
1270
1271 int xscale_disable_single_step(struct target_s *target)
1272 {
1273 armv4_5_common_t *armv4_5 = target->arch_info;
1274 xscale_common_t *xscale= armv4_5->arch_info;
1275 reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1276
1277 xscale_set_reg_u32(ibcr0, 0x0);
1278
1279 return ERROR_OK;
1280 }
1281
1282 int xscale_resume(struct target_s *target, int current, u32 address, int handle_breakpoints, int debug_execution)
1283 {
1284 armv4_5_common_t *armv4_5 = target->arch_info;
1285 xscale_common_t *xscale= armv4_5->arch_info;
1286 breakpoint_t *breakpoint = target->breakpoints;
1287
1288 u32 current_pc;
1289
1290 int retval;
1291 int i;
1292
1293 LOG_DEBUG("-");
1294
1295 if (target->state != TARGET_HALTED)
1296 {
1297 LOG_WARNING("target not halted");
1298 return ERROR_TARGET_NOT_HALTED;
1299 }
1300
1301 if (!debug_execution)
1302 {
1303 target_free_all_working_areas(target);
1304 }
1305
1306 /* update vector tables */
1307 if ((retval=xscale_update_vectors(target))!=ERROR_OK)
1308 return retval;
1309
1310 /* current = 1: continue on current pc, otherwise continue at <address> */
1311 if (!current)
1312 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1313
1314 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1315
1316 /* if we're at the reset vector, we have to simulate the branch */
1317 if (current_pc == 0x0)
1318 {
1319 arm_simulate_step(target, NULL);
1320 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1321 }
1322
1323 /* the front-end may request us not to handle breakpoints */
1324 if (handle_breakpoints)
1325 {
1326 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1327 {
1328 u32 next_pc;
1329
1330 /* there's a breakpoint at the current PC, we have to step over it */
1331 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1332 xscale_unset_breakpoint(target, breakpoint);
1333
1334 /* calculate PC of next instruction */
1335 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1336 {
1337 u32 current_opcode;
1338 target_read_u32(target, current_pc, &current_opcode);
1339 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8x", current_opcode);
1340 }
1341
1342 LOG_DEBUG("enable single-step");
1343 xscale_enable_single_step(target, next_pc);
1344
1345 /* restore banked registers */
1346 xscale_restore_context(target);
1347
1348 /* send resume request (command 0x30 or 0x31)
1349 * clean the trace buffer if it is to be enabled (0x62) */
1350 if (xscale->trace.buffer_enabled)
1351 {
1352 xscale_send_u32(target, 0x62);
1353 xscale_send_u32(target, 0x31);
1354 }
1355 else
1356 xscale_send_u32(target, 0x30);
1357
1358 /* send CPSR */
1359 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1360 LOG_DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1361
1362 for (i = 7; i >= 0; i--)
1363 {
1364 /* send register */
1365 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1366 LOG_DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1367 }
1368
1369 /* send PC */
1370 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1371 LOG_DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1372
1373 /* wait for and process debug entry */
1374 xscale_debug_entry(target);
1375
1376 LOG_DEBUG("disable single-step");
1377 xscale_disable_single_step(target);
1378
1379 LOG_DEBUG("set breakpoint at 0x%8.8x", breakpoint->address);
1380 xscale_set_breakpoint(target, breakpoint);
1381 }
1382 }
1383
1384 /* enable any pending breakpoints and watchpoints */
1385 xscale_enable_breakpoints(target);
1386 xscale_enable_watchpoints(target);
1387
1388 /* restore banked registers */
1389 xscale_restore_context(target);
1390
1391 /* send resume request (command 0x30 or 0x31)
1392 * clean the trace buffer if it is to be enabled (0x62) */
1393 if (xscale->trace.buffer_enabled)
1394 {
1395 xscale_send_u32(target, 0x62);
1396 xscale_send_u32(target, 0x31);
1397 }
1398 else
1399 xscale_send_u32(target, 0x30);
1400
1401 /* send CPSR */
1402 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1403 LOG_DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1404
1405 for (i = 7; i >= 0; i--)
1406 {
1407 /* send register */
1408 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1409 LOG_DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1410 }
1411
1412 /* send PC */
1413 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1414 LOG_DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1415
1416 target->debug_reason = DBG_REASON_NOTHALTED;
1417
1418 if (!debug_execution)
1419 {
1420 /* registers are now invalid */
1421 armv4_5_invalidate_core_regs(target);
1422 target->state = TARGET_RUNNING;
1423 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1424 }
1425 else
1426 {
1427 target->state = TARGET_DEBUG_RUNNING;
1428 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1429 }
1430
1431 LOG_DEBUG("target resumed");
1432
1433 xscale->handler_running = 1;
1434
1435 return ERROR_OK;
1436 }
1437
1438 int xscale_step(struct target_s *target, int current, u32 address, int handle_breakpoints)
1439 {
1440 armv4_5_common_t *armv4_5 = target->arch_info;
1441 xscale_common_t *xscale = armv4_5->arch_info;
1442 breakpoint_t *breakpoint = target->breakpoints;
1443
1444 u32 current_pc, next_pc;
1445 int i;
1446 int retval;
1447
1448 if (target->state != TARGET_HALTED)
1449 {
1450 LOG_WARNING("target not halted");
1451 return ERROR_TARGET_NOT_HALTED;
1452 }
1453
1454 /* current = 1: continue on current pc, otherwise continue at <address> */
1455 if (!current)
1456 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1457
1458 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1459
1460 /* if we're at the reset vector, we have to simulate the step */
1461 if (current_pc == 0x0)
1462 {
1463 arm_simulate_step(target, NULL);
1464 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1465
1466 target->debug_reason = DBG_REASON_SINGLESTEP;
1467 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1468
1469 return ERROR_OK;
1470 }
1471
1472 /* the front-end may request us not to handle breakpoints */
1473 if (handle_breakpoints)
1474 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1475 {
1476 xscale_unset_breakpoint(target, breakpoint);
1477 }
1478
1479 target->debug_reason = DBG_REASON_SINGLESTEP;
1480
1481 /* calculate PC of next instruction */
1482 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1483 {
1484 u32 current_opcode;
1485 target_read_u32(target, current_pc, &current_opcode);
1486 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8x", current_opcode);
1487 }
1488
1489 LOG_DEBUG("enable single-step");
1490 xscale_enable_single_step(target, next_pc);
1491
1492 /* restore banked registers */
1493 xscale_restore_context(target);
1494
1495 /* send resume request (command 0x30 or 0x31)
1496 * clean the trace buffer if it is to be enabled (0x62) */
1497 if (xscale->trace.buffer_enabled)
1498 {
1499 xscale_send_u32(target, 0x62);
1500 xscale_send_u32(target, 0x31);
1501 }
1502 else
1503 xscale_send_u32(target, 0x30);
1504
1505 /* send CPSR */
1506 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1507 LOG_DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1508
1509 for (i = 7; i >= 0; i--)
1510 {
1511 /* send register */
1512 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1513 LOG_DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1514 }
1515
1516 /* send PC */
1517 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1518 LOG_DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1519
1520 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1521
1522 /* registers are now invalid */
1523 armv4_5_invalidate_core_regs(target);
1524
1525 /* wait for and process debug entry */
1526 xscale_debug_entry(target);
1527
1528 LOG_DEBUG("disable single-step");
1529 xscale_disable_single_step(target);
1530
1531 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1532
1533 if (breakpoint)
1534 {
1535 xscale_set_breakpoint(target, breakpoint);
1536 }
1537
1538 LOG_DEBUG("target stepped");
1539
1540 return ERROR_OK;
1541
1542 }
1543
1544 int xscale_assert_reset(target_t *target)
1545 {
1546 armv4_5_common_t *armv4_5 = target->arch_info;
1547 xscale_common_t *xscale = armv4_5->arch_info;
1548
1549 LOG_DEBUG("target->state: %s", target_state_strings[target->state]);
1550
1551 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1552 * end up in T-L-R, which would reset JTAG
1553 */
1554 jtag_add_end_state(TAP_RTI);
1555 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dcsr);
1556
1557 /* set Hold reset, Halt mode and Trap Reset */
1558 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1559 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1560 xscale_write_dcsr(target, 1, 0);
1561
1562 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1563 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, 0x7f);
1564 jtag_execute_queue();
1565
1566 /* assert reset */
1567 jtag_add_reset(0, 1);
1568
1569 /* sleep 1ms, to be sure we fulfill any requirements */
1570 jtag_add_sleep(1000);
1571 jtag_execute_queue();
1572
1573 target->state = TARGET_RESET;
1574
1575 return ERROR_OK;
1576 }
1577
1578 int xscale_deassert_reset(target_t *target)
1579 {
1580 armv4_5_common_t *armv4_5 = target->arch_info;
1581 xscale_common_t *xscale = armv4_5->arch_info;
1582
1583 fileio_t debug_handler;
1584 u32 address;
1585 u32 binary_size;
1586
1587 u32 buf_cnt;
1588 int i;
1589 int retval;
1590
1591 breakpoint_t *breakpoint = target->breakpoints;
1592
1593 LOG_DEBUG("-");
1594
1595 xscale->ibcr_available = 2;
1596 xscale->ibcr0_used = 0;
1597 xscale->ibcr1_used = 0;
1598
1599 xscale->dbr_available = 2;
1600 xscale->dbr0_used = 0;
1601 xscale->dbr1_used = 0;
1602
1603 /* mark all hardware breakpoints as unset */
1604 while (breakpoint)
1605 {
1606 if (breakpoint->type == BKPT_HARD)
1607 {
1608 breakpoint->set = 0;
1609 }
1610 breakpoint = breakpoint->next;
1611 }
1612
1613 if (!xscale->handler_installed)
1614 {
1615 /* release SRST */
1616 jtag_add_reset(0, 0);
1617
1618 /* wait 300ms; 150 and 100ms were not enough */
1619 jtag_add_sleep(300*1000);
1620
1621 jtag_add_runtest(2030, TAP_RTI);
1622 jtag_execute_queue();
1623
1624 /* set Hold reset, Halt mode and Trap Reset */
1625 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1626 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1627 xscale_write_dcsr(target, 1, 0);
1628
1629 /* Load debug handler */
1630 if (fileio_open(&debug_handler, "xscale/debug_handler.bin", FILEIO_READ, FILEIO_BINARY) != ERROR_OK)
1631 {
1632 return ERROR_OK;
1633 }
1634
1635 if ((binary_size = debug_handler.size) % 4)
1636 {
1637 LOG_ERROR("debug_handler.bin: size not a multiple of 4");
1638 exit(-1);
1639 }
1640
1641 if (binary_size > 0x800)
1642 {
1643 LOG_ERROR("debug_handler.bin: larger than 2kb");
1644 exit(-1);
1645 }
1646
1647 binary_size = CEIL(binary_size, 32) * 32;
1648
1649 address = xscale->handler_address;
1650 while (binary_size > 0)
1651 {
1652 u32 cache_line[8];
1653 u8 buffer[32];
1654
1655 if ((retval = fileio_read(&debug_handler, 32, buffer, &buf_cnt)) != ERROR_OK)
1656 {
1657
1658 }
1659
1660 for (i = 0; i < buf_cnt; i += 4)
1661 {
1662 /* convert LE buffer to host-endian u32 */
1663 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1664 }
1665
1666 for (; buf_cnt < 32; buf_cnt += 4)
1667 {
1668 cache_line[buf_cnt / 4] = 0xe1a08008;
1669 }
1670
1671 /* only load addresses other than the reset vectors */
1672 if ((address % 0x400) != 0x0)
1673 {
1674 xscale_load_ic(target, 1, address, cache_line);
1675 }
1676
1677 address += buf_cnt;
1678 binary_size -= buf_cnt;
1679 };
1680
1681 xscale_load_ic(target, 1, 0x0, xscale->low_vectors);
1682 xscale_load_ic(target, 1, 0xffff0000, xscale->high_vectors);
1683
1684 jtag_add_runtest(30, TAP_RTI);
1685
1686 jtag_add_sleep(100000);
1687
1688 /* set Hold reset, Halt mode and Trap Reset */
1689 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1690 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1691 xscale_write_dcsr(target, 1, 0);
1692
1693 /* clear Hold reset to let the target run (should enter debug handler) */
1694 xscale_write_dcsr(target, 0, 1);
1695 target->state = TARGET_RUNNING;
1696
1697 if ((target->reset_mode != RESET_HALT) && (target->reset_mode != RESET_INIT))
1698 {
1699 jtag_add_sleep(10000);
1700
1701 /* we should have entered debug now */
1702 xscale_debug_entry(target);
1703 target->state = TARGET_HALTED;
1704
1705 /* resume the target */
1706 xscale_resume(target, 1, 0x0, 1, 0);
1707 }
1708
1709 fileio_close(&debug_handler);
1710 }
1711 else
1712 {
1713 jtag_add_reset(0, 0);
1714 }
1715
1716
1717 return ERROR_OK;
1718 }
1719
1720 int xscale_soft_reset_halt(struct target_s *target)
1721 {
1722
1723 return ERROR_OK;
1724 }
1725
1726 int xscale_read_core_reg(struct target_s *target, int num, enum armv4_5_mode mode)
1727 {
1728
1729 return ERROR_OK;
1730 }
1731
1732 int xscale_write_core_reg(struct target_s *target, int num, enum armv4_5_mode mode, u32 value)
1733 {
1734
1735 return ERROR_OK;
1736 }
1737
1738 int xscale_full_context(target_t *target)
1739 {
1740 armv4_5_common_t *armv4_5 = target->arch_info;
1741
1742 u32 *buffer;
1743
1744 int i, j;
1745
1746 LOG_DEBUG("-");
1747
1748 if (target->state != TARGET_HALTED)
1749 {
1750 LOG_WARNING("target not halted");
1751 return ERROR_TARGET_NOT_HALTED;
1752 }
1753
1754 buffer = malloc(4 * 8);
1755
1756 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1757 * we can't enter User mode on an XScale (unpredictable),
1758 * but User shares registers with SYS
1759 */
1760 for(i = 1; i < 7; i++)
1761 {
1762 int valid = 1;
1763
1764 /* check if there are invalid registers in the current mode
1765 */
1766 for (j = 0; j <= 16; j++)
1767 {
1768 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid == 0)
1769 valid = 0;
1770 }
1771
1772 if (!valid)
1773 {
1774 u32 tmp_cpsr;
1775
1776 /* request banked registers */
1777 xscale_send_u32(target, 0x0);
1778
1779 tmp_cpsr = 0x0;
1780 tmp_cpsr |= armv4_5_number_to_mode(i);
1781 tmp_cpsr |= 0xc0; /* I/F bits */
1782
1783 /* send CPSR for desired mode */
1784 xscale_send_u32(target, tmp_cpsr);
1785
1786 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1787 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1788 {
1789 xscale_receive(target, buffer, 8);
1790 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1791 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1792 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).valid = 1;
1793 }
1794 else
1795 {
1796 xscale_receive(target, buffer, 7);
1797 }
1798
1799 /* move data from buffer to register cache */
1800 for (j = 8; j <= 14; j++)
1801 {
1802 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).value, 0, 32, buffer[j - 8]);
1803 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1804 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid = 1;
1805 }
1806 }
1807 }
1808
1809 free(buffer);
1810
1811 return ERROR_OK;
1812 }
1813
1814 int xscale_restore_context(target_t *target)
1815 {
1816 armv4_5_common_t *armv4_5 = target->arch_info;
1817
1818 int i, j;
1819
1820 LOG_DEBUG("-");
1821
1822 if (target->state != TARGET_HALTED)
1823 {
1824 LOG_WARNING("target not halted");
1825 return ERROR_TARGET_NOT_HALTED;
1826 }
1827
1828 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1829 * we can't enter User mode on an XScale (unpredictable),
1830 * but User shares registers with SYS
1831 */
1832 for(i = 1; i < 7; i++)
1833 {
1834 int dirty = 0;
1835
1836 /* check if there are invalid registers in the current mode
1837 */
1838 for (j = 8; j <= 14; j++)
1839 {
1840 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty == 1)
1841 dirty = 1;
1842 }
1843
1844 /* if not USR/SYS, check if the SPSR needs to be written */
1845 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1846 {
1847 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty == 1)
1848 dirty = 1;
1849 }
1850
1851 if (dirty)
1852 {
1853 u32 tmp_cpsr;
1854
1855 /* send banked registers */
1856 xscale_send_u32(target, 0x1);
1857
1858 tmp_cpsr = 0x0;
1859 tmp_cpsr |= armv4_5_number_to_mode(i);
1860 tmp_cpsr |= 0xc0; /* I/F bits */
1861
1862 /* send CPSR for desired mode */
1863 xscale_send_u32(target, tmp_cpsr);
1864
1865 /* send banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1866 for (j = 8; j <= 14; j++)
1867 {
1868 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, j).value, 0, 32));
1869 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1870 }
1871
1872 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1873 {
1874 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32));
1875 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1876 }
1877 }
1878 }
1879
1880 return ERROR_OK;
1881 }
1882
1883 int xscale_read_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer)
1884 {
1885 armv4_5_common_t *armv4_5 = target->arch_info;
1886 xscale_common_t *xscale = armv4_5->arch_info;
1887 u32 *buf32;
1888 int i;
1889 int retval;
1890
1891 LOG_DEBUG("address: 0x%8.8x, size: 0x%8.8x, count: 0x%8.8x", address, size, count);
1892
1893 if (target->state != TARGET_HALTED)
1894 {
1895 LOG_WARNING("target not halted");
1896 return ERROR_TARGET_NOT_HALTED;
1897 }
1898
1899 /* sanitize arguments */
1900 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1901 return ERROR_INVALID_ARGUMENTS;
1902
1903 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1904 return ERROR_TARGET_UNALIGNED_ACCESS;
1905
1906 /* send memory read request (command 0x1n, n: access size) */
1907 if ((retval=xscale_send_u32(target, 0x10 | size))!=ERROR_OK)
1908 return retval;
1909
1910 /* send base address for read request */
1911 if ((retval=xscale_send_u32(target, address))!=ERROR_OK)
1912 return retval;
1913
1914 /* send number of requested data words */
1915 if ((retval=xscale_send_u32(target, count))!=ERROR_OK)
1916 return retval;
1917
1918 /* receive data from target (count times 32-bit words in host endianness) */
1919 buf32 = malloc(4 * count);
1920 if ((retval=xscale_receive(target, buf32, count))!=ERROR_OK)
1921 return retval;
1922
1923 /* extract data from host-endian buffer into byte stream */
1924 for (i = 0; i < count; i++)
1925 {
1926 switch (size)
1927 {
1928 case 4:
1929 target_buffer_set_u32(target, buffer, buf32[i]);
1930 buffer += 4;
1931 break;
1932 case 2:
1933 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1934 buffer += 2;
1935 break;
1936 case 1:
1937 *buffer++ = buf32[i] & 0xff;
1938 break;
1939 default:
1940 LOG_ERROR("should never get here");
1941 exit(-1);
1942 }
1943 }
1944
1945 free(buf32);
1946
1947 /* examine DCSR, to see if Sticky Abort (SA) got set */
1948 if ((retval=xscale_read_dcsr(target))!=ERROR_OK)
1949 return retval;
1950 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1951 {
1952 /* clear SA bit */
1953 if ((retval=xscale_send_u32(target, 0x60))!=ERROR_OK)
1954 return retval;
1955
1956 return ERROR_TARGET_DATA_ABORT;
1957 }
1958
1959 return ERROR_OK;
1960 }
1961
1962 int xscale_write_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer)
1963 {
1964 armv4_5_common_t *armv4_5 = target->arch_info;
1965 xscale_common_t *xscale = armv4_5->arch_info;
1966 int retval;
1967
1968 LOG_DEBUG("address: 0x%8.8x, size: 0x%8.8x, count: 0x%8.8x", address, size, count);
1969
1970 if (target->state != TARGET_HALTED)
1971 {
1972 LOG_WARNING("target not halted");
1973 return ERROR_TARGET_NOT_HALTED;
1974 }
1975
1976 /* sanitize arguments */
1977 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1978 return ERROR_INVALID_ARGUMENTS;
1979
1980 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1981 return ERROR_TARGET_UNALIGNED_ACCESS;
1982
1983 /* send memory write request (command 0x2n, n: access size) */
1984 if ((retval=xscale_send_u32(target, 0x20 | size))!=ERROR_OK)
1985 return retval;
1986
1987 /* send base address for read request */
1988 if ((retval=xscale_send_u32(target, address))!=ERROR_OK)
1989 return retval;
1990
1991 /* send number of requested data words to be written*/
1992 if ((retval=xscale_send_u32(target, count))!=ERROR_OK)
1993 return retval;
1994
1995 /* extract data from host-endian buffer into byte stream */
1996 #if 0
1997 for (i = 0; i < count; i++)
1998 {
1999 switch (size)
2000 {
2001 case 4:
2002 value = target_buffer_get_u32(target, buffer);
2003 xscale_send_u32(target, value);
2004 buffer += 4;
2005 break;
2006 case 2:
2007 value = target_buffer_get_u16(target, buffer);
2008 xscale_send_u32(target, value);
2009 buffer += 2;
2010 break;
2011 case 1:
2012 value = *buffer;
2013 xscale_send_u32(target, value);
2014 buffer += 1;
2015 break;
2016 default:
2017 LOG_ERROR("should never get here");
2018 exit(-1);
2019 }
2020 }
2021 #endif
2022 if ((retval=xscale_send(target, buffer, count, size))!=ERROR_OK)
2023 return retval;
2024
2025 /* examine DCSR, to see if Sticky Abort (SA) got set */
2026 if ((retval=xscale_read_dcsr(target))!=ERROR_OK)
2027 return retval;
2028 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
2029 {
2030 /* clear SA bit */
2031 if ((retval=xscale_send_u32(target, 0x60))!=ERROR_OK)
2032 return retval;
2033
2034 return ERROR_TARGET_DATA_ABORT;
2035 }
2036
2037 return ERROR_OK;
2038 }
2039
2040 int xscale_bulk_write_memory(target_t *target, u32 address, u32 count, u8 *buffer)
2041 {
2042 return xscale_write_memory(target, address, 4, count, buffer);
2043 }
2044
2045 int xscale_checksum_memory(struct target_s *target, u32 address, u32 count, u32* checksum)
2046 {
2047 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2048 }
2049
2050 u32 xscale_get_ttb(target_t *target)
2051 {
2052 armv4_5_common_t *armv4_5 = target->arch_info;
2053 xscale_common_t *xscale = armv4_5->arch_info;
2054 u32 ttb;
2055
2056 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
2057 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
2058
2059 return ttb;
2060 }
2061
2062 void xscale_disable_mmu_caches(target_t *target, int mmu, int d_u_cache, int i_cache)
2063 {
2064 armv4_5_common_t *armv4_5 = target->arch_info;
2065 xscale_common_t *xscale = armv4_5->arch_info;
2066 u32 cp15_control;
2067
2068 /* read cp15 control register */
2069 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2070 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2071
2072 if (mmu)
2073 cp15_control &= ~0x1U;
2074
2075 if (d_u_cache)
2076 {
2077 /* clean DCache */
2078 xscale_send_u32(target, 0x50);
2079 xscale_send_u32(target, xscale->cache_clean_address);
2080
2081 /* invalidate DCache */
2082 xscale_send_u32(target, 0x51);
2083
2084 cp15_control &= ~0x4U;
2085 }
2086
2087 if (i_cache)
2088 {
2089 /* invalidate ICache */
2090 xscale_send_u32(target, 0x52);
2091 cp15_control &= ~0x1000U;
2092 }
2093
2094 /* write new cp15 control register */
2095 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2096
2097 /* execute cpwait to ensure outstanding operations complete */
2098 xscale_send_u32(target, 0x53);
2099 }
2100
2101 void xscale_enable_mmu_caches(target_t *target, int mmu, int d_u_cache, int i_cache)
2102 {
2103 armv4_5_common_t *armv4_5 = target->arch_info;
2104 xscale_common_t *xscale = armv4_5->arch_info;
2105 u32 cp15_control;
2106
2107 /* read cp15 control register */
2108 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2109 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2110
2111 if (mmu)
2112 cp15_control |= 0x1U;
2113
2114 if (d_u_cache)
2115 cp15_control |= 0x4U;
2116
2117 if (i_cache)
2118 cp15_control |= 0x1000U;
2119
2120 /* write new cp15 control register */
2121 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2122
2123 /* execute cpwait to ensure outstanding operations complete */
2124 xscale_send_u32(target, 0x53);
2125 }
2126
2127 int xscale_set_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2128 {
2129 armv4_5_common_t *armv4_5 = target->arch_info;
2130 xscale_common_t *xscale = armv4_5->arch_info;
2131
2132 if (target->state != TARGET_HALTED)
2133 {
2134 LOG_WARNING("target not halted");
2135 return ERROR_TARGET_NOT_HALTED;
2136 }
2137
2138 if (xscale->force_hw_bkpts)
2139 breakpoint->type = BKPT_HARD;
2140
2141 if (breakpoint->set)
2142 {
2143 LOG_WARNING("breakpoint already set");
2144 return ERROR_OK;
2145 }
2146
2147 if (breakpoint->type == BKPT_HARD)
2148 {
2149 u32 value = breakpoint->address | 1;
2150 if (!xscale->ibcr0_used)
2151 {
2152 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2153 xscale->ibcr0_used = 1;
2154 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2155 }
2156 else if (!xscale->ibcr1_used)
2157 {
2158 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2159 xscale->ibcr1_used = 1;
2160 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2161 }
2162 else
2163 {
2164 LOG_ERROR("BUG: no hardware comparator available");
2165 return ERROR_OK;
2166 }
2167 }
2168 else if (breakpoint->type == BKPT_SOFT)
2169 {
2170 if (breakpoint->length == 4)
2171 {
2172 /* keep the original instruction in target endianness */
2173 target->type->read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr);
2174 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2175 target_write_u32(target, breakpoint->address, xscale->arm_bkpt);
2176 }
2177 else
2178 {
2179 /* keep the original instruction in target endianness */
2180 target->type->read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr);
2181 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2182 target_write_u32(target, breakpoint->address, xscale->thumb_bkpt);
2183 }
2184 breakpoint->set = 1;
2185 }
2186
2187 return ERROR_OK;
2188
2189 }
2190
2191 int xscale_add_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2192 {
2193 armv4_5_common_t *armv4_5 = target->arch_info;
2194 xscale_common_t *xscale = armv4_5->arch_info;
2195
2196 if (target->state != TARGET_HALTED)
2197 {
2198 LOG_WARNING("target not halted");
2199 return ERROR_TARGET_NOT_HALTED;
2200 }
2201
2202 if (xscale->force_hw_bkpts)
2203 {
2204 LOG_DEBUG("forcing use of hardware breakpoint at address 0x%8.8x", breakpoint->address);
2205 breakpoint->type = BKPT_HARD;
2206 }
2207
2208 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2209 {
2210 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2211 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2212 }
2213 else
2214 {
2215 xscale->ibcr_available--;
2216 }
2217
2218 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2219 {
2220 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2221 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2222 }
2223
2224 return ERROR_OK;
2225 }
2226
2227 int xscale_unset_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2228 {
2229 armv4_5_common_t *armv4_5 = target->arch_info;
2230 xscale_common_t *xscale = armv4_5->arch_info;
2231
2232 if (target->state != TARGET_HALTED)
2233 {
2234 LOG_WARNING("target not halted");
2235 return ERROR_TARGET_NOT_HALTED;
2236 }
2237
2238 if (!breakpoint->set)
2239 {
2240 LOG_WARNING("breakpoint not set");
2241 return ERROR_OK;
2242 }
2243
2244 if (breakpoint->type == BKPT_HARD)
2245 {
2246 if (breakpoint->set == 1)
2247 {
2248 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2249 xscale->ibcr0_used = 0;
2250 }
2251 else if (breakpoint->set == 2)
2252 {
2253 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2254 xscale->ibcr1_used = 0;
2255 }
2256 breakpoint->set = 0;
2257 }
2258 else
2259 {
2260 /* restore original instruction (kept in target endianness) */
2261 if (breakpoint->length == 4)
2262 {
2263 target->type->write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr);
2264 }
2265 else
2266 {
2267 target->type->write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr);
2268 }
2269 breakpoint->set = 0;
2270 }
2271
2272 return ERROR_OK;
2273 }
2274
2275 int xscale_remove_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2276 {
2277 armv4_5_common_t *armv4_5 = target->arch_info;
2278 xscale_common_t *xscale = armv4_5->arch_info;
2279
2280 if (target->state != TARGET_HALTED)
2281 {
2282 LOG_WARNING("target not halted");
2283 return ERROR_TARGET_NOT_HALTED;
2284 }
2285
2286 if (breakpoint->set)
2287 {
2288 xscale_unset_breakpoint(target, breakpoint);
2289 }
2290
2291 if (breakpoint->type == BKPT_HARD)
2292 xscale->ibcr_available++;
2293
2294 return ERROR_OK;
2295 }
2296
2297 int xscale_set_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2298 {
2299 armv4_5_common_t *armv4_5 = target->arch_info;
2300 xscale_common_t *xscale = armv4_5->arch_info;
2301 u8 enable=0;
2302 reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2303 u32 dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2304
2305 if (target->state != TARGET_HALTED)
2306 {
2307 LOG_WARNING("target not halted");
2308 return ERROR_TARGET_NOT_HALTED;
2309 }
2310
2311 xscale_get_reg(dbcon);
2312
2313 switch (watchpoint->rw)
2314 {
2315 case WPT_READ:
2316 enable = 0x3;
2317 break;
2318 case WPT_ACCESS:
2319 enable = 0x2;
2320 break;
2321 case WPT_WRITE:
2322 enable = 0x1;
2323 break;
2324 default:
2325 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2326 }
2327
2328 if (!xscale->dbr0_used)
2329 {
2330 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2331 dbcon_value |= enable;
2332 xscale_set_reg_u32(dbcon, dbcon_value);
2333 watchpoint->set = 1;
2334 xscale->dbr0_used = 1;
2335 }
2336 else if (!xscale->dbr1_used)
2337 {
2338 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2339 dbcon_value |= enable << 2;
2340 xscale_set_reg_u32(dbcon, dbcon_value);
2341 watchpoint->set = 2;
2342 xscale->dbr1_used = 1;
2343 }
2344 else
2345 {
2346 LOG_ERROR("BUG: no hardware comparator available");
2347 return ERROR_OK;
2348 }
2349
2350 return ERROR_OK;
2351 }
2352
2353 int xscale_add_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2354 {
2355 armv4_5_common_t *armv4_5 = target->arch_info;
2356 xscale_common_t *xscale = armv4_5->arch_info;
2357
2358 if (target->state != TARGET_HALTED)
2359 {
2360 LOG_WARNING("target not halted");
2361 return ERROR_TARGET_NOT_HALTED;
2362 }
2363
2364 if (xscale->dbr_available < 1)
2365 {
2366 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2367 }
2368
2369 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2370 {
2371 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2372 }
2373
2374 xscale->dbr_available--;
2375
2376 return ERROR_OK;
2377 }
2378
2379 int xscale_unset_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2380 {
2381 armv4_5_common_t *armv4_5 = target->arch_info;
2382 xscale_common_t *xscale = armv4_5->arch_info;
2383 reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2384 u32 dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2385
2386 if (target->state != TARGET_HALTED)
2387 {
2388 LOG_WARNING("target not halted");
2389 return ERROR_TARGET_NOT_HALTED;
2390 }
2391
2392 if (!watchpoint->set)
2393 {
2394 LOG_WARNING("breakpoint not set");
2395 return ERROR_OK;
2396 }
2397
2398 if (watchpoint->set == 1)
2399 {
2400 dbcon_value &= ~0x3;
2401 xscale_set_reg_u32(dbcon, dbcon_value);
2402 xscale->dbr0_used = 0;
2403 }
2404 else if (watchpoint->set == 2)
2405 {
2406 dbcon_value &= ~0xc;
2407 xscale_set_reg_u32(dbcon, dbcon_value);
2408 xscale->dbr1_used = 0;
2409 }
2410 watchpoint->set = 0;
2411
2412 return ERROR_OK;
2413 }
2414
2415 int xscale_remove_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2416 {
2417 armv4_5_common_t *armv4_5 = target->arch_info;
2418 xscale_common_t *xscale = armv4_5->arch_info;
2419
2420 if (target->state != TARGET_HALTED)
2421 {
2422 LOG_WARNING("target not halted");
2423 return ERROR_TARGET_NOT_HALTED;
2424 }
2425
2426 if (watchpoint->set)
2427 {
2428 xscale_unset_watchpoint(target, watchpoint);
2429 }
2430
2431 xscale->dbr_available++;
2432
2433 return ERROR_OK;
2434 }
2435
2436 void xscale_enable_watchpoints(struct target_s *target)
2437 {
2438 watchpoint_t *watchpoint = target->watchpoints;
2439
2440 while (watchpoint)
2441 {
2442 if (watchpoint->set == 0)
2443 xscale_set_watchpoint(target, watchpoint);
2444 watchpoint = watchpoint->next;
2445 }
2446 }
2447
2448 void xscale_enable_breakpoints(struct target_s *target)
2449 {
2450 breakpoint_t *breakpoint = target->breakpoints;
2451
2452 /* set any pending breakpoints */
2453 while (breakpoint)
2454 {
2455 if (breakpoint->set == 0)
2456 xscale_set_breakpoint(target, breakpoint);
2457 breakpoint = breakpoint->next;
2458 }
2459 }
2460
2461 int xscale_get_reg(reg_t *reg)
2462 {
2463 xscale_reg_t *arch_info = reg->arch_info;
2464 target_t *target = arch_info->target;
2465 armv4_5_common_t *armv4_5 = target->arch_info;
2466 xscale_common_t *xscale = armv4_5->arch_info;
2467
2468 /* DCSR, TX and RX are accessible via JTAG */
2469 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2470 {
2471 return xscale_read_dcsr(arch_info->target);
2472 }
2473 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2474 {
2475 /* 1 = consume register content */
2476 return xscale_read_tx(arch_info->target, 1);
2477 }
2478 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2479 {
2480 /* can't read from RX register (host -> debug handler) */
2481 return ERROR_OK;
2482 }
2483 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2484 {
2485 /* can't (explicitly) read from TXRXCTRL register */
2486 return ERROR_OK;
2487 }
2488 else /* Other DBG registers have to be transfered by the debug handler */
2489 {
2490 /* send CP read request (command 0x40) */
2491 xscale_send_u32(target, 0x40);
2492
2493 /* send CP register number */
2494 xscale_send_u32(target, arch_info->dbg_handler_number);
2495
2496 /* read register value */
2497 xscale_read_tx(target, 1);
2498 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2499
2500 reg->dirty = 0;
2501 reg->valid = 1;
2502 }
2503
2504 return ERROR_OK;
2505 }
2506
2507 int xscale_set_reg(reg_t *reg, u8* buf)
2508 {
2509 xscale_reg_t *arch_info = reg->arch_info;
2510 target_t *target = arch_info->target;
2511 armv4_5_common_t *armv4_5 = target->arch_info;
2512 xscale_common_t *xscale = armv4_5->arch_info;
2513 u32 value = buf_get_u32(buf, 0, 32);
2514
2515 /* DCSR, TX and RX are accessible via JTAG */
2516 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2517 {
2518 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2519 return xscale_write_dcsr(arch_info->target, -1, -1);
2520 }
2521 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2522 {
2523 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2524 return xscale_write_rx(arch_info->target);
2525 }
2526 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2527 {
2528 /* can't write to TX register (debug-handler -> host) */
2529 return ERROR_OK;
2530 }
2531 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2532 {
2533 /* can't (explicitly) write to TXRXCTRL register */
2534 return ERROR_OK;
2535 }
2536 else /* Other DBG registers have to be transfered by the debug handler */
2537 {
2538 /* send CP write request (command 0x41) */
2539 xscale_send_u32(target, 0x41);
2540
2541 /* send CP register number */
2542 xscale_send_u32(target, arch_info->dbg_handler_number);
2543
2544 /* send CP register value */
2545 xscale_send_u32(target, value);
2546 buf_set_u32(reg->value, 0, 32, value);
2547 }
2548
2549 return ERROR_OK;
2550 }
2551
2552 /* convenience wrapper to access XScale specific registers */
2553 int xscale_set_reg_u32(reg_t *reg, u32 value)
2554 {
2555 u8 buf[4];
2556
2557 buf_set_u32(buf, 0, 32, value);
2558
2559 return xscale_set_reg(reg, buf);
2560 }
2561
2562 int xscale_write_dcsr_sw(target_t *target, u32 value)
2563 {
2564 /* get pointers to arch-specific information */
2565 armv4_5_common_t *armv4_5 = target->arch_info;
2566 xscale_common_t *xscale = armv4_5->arch_info;
2567 reg_t *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2568 xscale_reg_t *dcsr_arch_info = dcsr->arch_info;
2569
2570 /* send CP write request (command 0x41) */
2571 xscale_send_u32(target, 0x41);
2572
2573 /* send CP register number */
2574 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2575
2576 /* send CP register value */
2577 xscale_send_u32(target, value);
2578 buf_set_u32(dcsr->value, 0, 32, value);
2579
2580 return ERROR_OK;
2581 }
2582
2583 int xscale_read_trace(target_t *target)
2584 {
2585 /* get pointers to arch-specific information */
2586 armv4_5_common_t *armv4_5 = target->arch_info;
2587 xscale_common_t *xscale = armv4_5->arch_info;
2588 xscale_trace_data_t **trace_data_p;
2589
2590 /* 258 words from debug handler
2591 * 256 trace buffer entries
2592 * 2 checkpoint addresses
2593 */
2594 u32 trace_buffer[258];
2595 int is_address[256];
2596 int i, j;
2597
2598 if (target->state != TARGET_HALTED)
2599 {
2600 LOG_WARNING("target must be stopped to read trace data");
2601 return ERROR_TARGET_NOT_HALTED;
2602 }
2603
2604 /* send read trace buffer command (command 0x61) */
2605 xscale_send_u32(target, 0x61);
2606
2607 /* receive trace buffer content */
2608 xscale_receive(target, trace_buffer, 258);
2609
2610 /* parse buffer backwards to identify address entries */
2611 for (i = 255; i >= 0; i--)
2612 {
2613 is_address[i] = 0;
2614 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2615 ((trace_buffer[i] & 0xf0) == 0xd0))
2616 {
2617 if (i >= 3)
2618 is_address[--i] = 1;
2619 if (i >= 2)
2620 is_address[--i] = 1;
2621 if (i >= 1)
2622 is_address[--i] = 1;
2623 if (i >= 0)
2624 is_address[--i] = 1;
2625 }
2626 }
2627
2628
2629 /* search first non-zero entry */
2630 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2631 ;
2632
2633 if (j == 256)
2634 {
2635 LOG_DEBUG("no trace data collected");
2636 return ERROR_XSCALE_NO_TRACE_DATA;
2637 }
2638
2639 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2640 ;
2641
2642 *trace_data_p = malloc(sizeof(xscale_trace_data_t));
2643 (*trace_data_p)->next = NULL;
2644 (*trace_data_p)->chkpt0 = trace_buffer[256];
2645 (*trace_data_p)->chkpt1 = trace_buffer[257];
2646 (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
2647 (*trace_data_p)->entries = malloc(sizeof(xscale_trace_entry_t) * (256 - j));
2648 (*trace_data_p)->depth = 256 - j;
2649
2650 for (i = j; i < 256; i++)
2651 {
2652 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2653 if (is_address[i])
2654 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2655 else
2656 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2657 }
2658
2659 return ERROR_OK;
2660 }
2661
2662 int xscale_read_instruction(target_t *target, arm_instruction_t *instruction)
2663 {
2664 /* get pointers to arch-specific information */
2665 armv4_5_common_t *armv4_5 = target->arch_info;
2666 xscale_common_t *xscale = armv4_5->arch_info;
2667 int i;
2668 int section = -1;
2669 u32 size_read;
2670 u32 opcode;
2671 int retval;
2672
2673 if (!xscale->trace.image)
2674 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2675
2676 /* search for the section the current instruction belongs to */
2677 for (i = 0; i < xscale->trace.image->num_sections; i++)
2678 {
2679 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2680 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2681 {
2682 section = i;
2683 break;
2684 }
2685 }
2686
2687 if (section == -1)
2688 {
2689 /* current instruction couldn't be found in the image */
2690 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2691 }
2692
2693 if (xscale->trace.core_state == ARMV4_5_STATE_ARM)
2694 {
2695 u8 buf[4];
2696 if ((retval = image_read_section(xscale->trace.image, section,
2697 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2698 4, buf, &size_read)) != ERROR_OK)
2699 {
2700 LOG_ERROR("error while reading instruction: %i", retval);
2701 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2702 }
2703 opcode = target_buffer_get_u32(target, buf);
2704 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2705 }
2706 else if (xscale->trace.core_state == ARMV4_5_STATE_THUMB)
2707 {
2708 u8 buf[2];
2709 if ((retval = image_read_section(xscale->trace.image, section,
2710 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2711 2, buf, &size_read)) != ERROR_OK)
2712 {
2713 LOG_ERROR("error while reading instruction: %i", retval);
2714 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2715 }
2716 opcode = target_buffer_get_u16(target, buf);
2717 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2718 }
2719 else
2720 {
2721 LOG_ERROR("BUG: unknown core state encountered");
2722 exit(-1);
2723 }
2724
2725 return ERROR_OK;
2726 }
2727
2728 int xscale_branch_address(xscale_trace_data_t *trace_data, int i, u32 *target)
2729 {
2730 /* if there are less than four entries prior to the indirect branch message
2731 * we can't extract the address */
2732 if (i < 4)
2733 {
2734 return -1;
2735 }
2736
2737 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2738 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2739
2740 return 0;
2741 }
2742
2743 int xscale_analyze_trace(target_t *target, command_context_t *cmd_ctx)
2744 {
2745 /* get pointers to arch-specific information */
2746 armv4_5_common_t *armv4_5 = target->arch_info;
2747 xscale_common_t *xscale = armv4_5->arch_info;
2748 int next_pc_ok = 0;
2749 u32 next_pc = 0x0;
2750 xscale_trace_data_t *trace_data = xscale->trace.data;
2751 int retval;
2752
2753 while (trace_data)
2754 {
2755 int i, chkpt;
2756 int rollover;
2757 int branch;
2758 int exception;
2759 xscale->trace.core_state = ARMV4_5_STATE_ARM;
2760
2761 chkpt = 0;
2762 rollover = 0;
2763
2764 for (i = 0; i < trace_data->depth; i++)
2765 {
2766 next_pc_ok = 0;
2767 branch = 0;
2768 exception = 0;
2769
2770 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2771 continue;
2772
2773 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2774 {
2775 case 0: /* Exceptions */
2776 case 1:
2777 case 2:
2778 case 3:
2779 case 4:
2780 case 5:
2781 case 6:
2782 case 7:
2783 exception = (trace_data->entries[i].data & 0x70) >> 4;
2784 next_pc_ok = 1;
2785 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2786 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2787 break;
2788 case 8: /* Direct Branch */
2789 branch = 1;
2790 break;
2791 case 9: /* Indirect Branch */
2792 branch = 1;
2793 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2794 {
2795 next_pc_ok = 1;
2796 }
2797 break;
2798 case 13: /* Checkpointed Indirect Branch */
2799 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2800 {
2801 next_pc_ok = 1;
2802 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2803 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2804 LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2805 }
2806 /* explicit fall-through */
2807 case 12: /* Checkpointed Direct Branch */
2808 branch = 1;
2809 if (chkpt == 0)
2810 {
2811 next_pc_ok = 1;
2812 next_pc = trace_data->chkpt0;
2813 chkpt++;
2814 }
2815 else if (chkpt == 1)
2816 {
2817 next_pc_ok = 1;
2818 next_pc = trace_data->chkpt0;
2819 chkpt++;
2820 }
2821 else
2822 {
2823 LOG_WARNING("more than two checkpointed branches encountered");
2824 }
2825 break;
2826 case 15: /* Roll-over */
2827 rollover++;
2828 continue;
2829 default: /* Reserved */
2830 command_print(cmd_ctx, "--- reserved trace message ---");
2831 LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2832 return ERROR_OK;
2833 }
2834
2835 if (xscale->trace.pc_ok)
2836 {
2837 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2838 arm_instruction_t instruction;
2839
2840 if ((exception == 6) || (exception == 7))
2841 {
2842 /* IRQ or FIQ exception, no instruction executed */
2843 executed -= 1;
2844 }
2845
2846 while (executed-- >= 0)
2847 {
2848 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2849 {
2850 /* can't continue tracing with no image available */
2851 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2852 {
2853 return retval;
2854 }
2855 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2856 {
2857 /* TODO: handle incomplete images */
2858 }
2859 }
2860
2861 /* a precise abort on a load to the PC is included in the incremental
2862 * word count, other instructions causing data aborts are not included
2863 */
2864 if ((executed == 0) && (exception == 4)
2865 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2866 {
2867 if ((instruction.type == ARM_LDM)
2868 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2869 {
2870 executed--;
2871 }
2872 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2873 && (instruction.info.load_store.Rd != 15))
2874 {
2875 executed--;
2876 }
2877 }
2878
2879 /* only the last instruction executed
2880 * (the one that caused the control flow change)
2881 * could be a taken branch
2882 */
2883 if (((executed == -1) && (branch == 1)) &&
2884 (((instruction.type == ARM_B) ||
2885 (instruction.type == ARM_BL) ||
2886 (instruction.type == ARM_BLX)) &&
2887 (instruction.info.b_bl_bx_blx.target_address != -1)))
2888 {
2889 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2890 }
2891 else
2892 {
2893 xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2;
2894 }
2895 command_print(cmd_ctx, "%s", instruction.text);
2896 }
2897
2898 rollover = 0;
2899 }
2900
2901 if (next_pc_ok)
2902 {
2903 xscale->trace.current_pc = next_pc;
2904 xscale->trace.pc_ok = 1;
2905 }
2906 }
2907
2908 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2)
2909 {
2910 arm_instruction_t instruction;
2911 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2912 {
2913 /* can't continue tracing with no image available */
2914 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2915 {
2916 return retval;
2917 }
2918 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2919 {
2920 /* TODO: handle incomplete images */
2921 }
2922 }
2923 command_print(cmd_ctx, "%s", instruction.text);
2924 }
2925
2926 trace_data = trace_data->next;
2927 }
2928
2929 return ERROR_OK;
2930 }
2931
2932 void xscale_build_reg_cache(target_t *target)
2933 {
2934 /* get pointers to arch-specific information */
2935 armv4_5_common_t *armv4_5 = target->arch_info;
2936 xscale_common_t *xscale = armv4_5->arch_info;
2937
2938 reg_cache_t **cache_p = register_get_last_cache_p(&target->reg_cache);
2939 xscale_reg_t *arch_info = malloc(sizeof(xscale_reg_arch_info));
2940 int i;
2941 int num_regs = sizeof(xscale_reg_arch_info) / sizeof(xscale_reg_t);
2942
2943 (*cache_p) = armv4_5_build_reg_cache(target, armv4_5);
2944 armv4_5->core_cache = (*cache_p);
2945
2946 /* register a register arch-type for XScale dbg registers only once */
2947 if (xscale_reg_arch_type == -1)
2948 xscale_reg_arch_type = register_reg_arch_type(xscale_get_reg, xscale_set_reg);
2949
2950 (*cache_p)->next = malloc(sizeof(reg_cache_t));
2951 cache_p = &(*cache_p)->next;
2952
2953 /* fill in values for the xscale reg cache */
2954 (*cache_p)->name = "XScale registers";
2955 (*cache_p)->next = NULL;
2956 (*cache_p)->reg_list = malloc(num_regs * sizeof(reg_t));
2957 (*cache_p)->num_regs = num_regs;
2958
2959 for (i = 0; i < num_regs; i++)
2960 {
2961 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2962 (*cache_p)->reg_list[i].value = calloc(4, 1);
2963 (*cache_p)->reg_list[i].dirty = 0;
2964 (*cache_p)->reg_list[i].valid = 0;
2965 (*cache_p)->reg_list[i].size = 32;
2966 (*cache_p)->reg_list[i].bitfield_desc = NULL;
2967 (*cache_p)->reg_list[i].num_bitfields = 0;
2968 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2969 (*cache_p)->reg_list[i].arch_type = xscale_reg_arch_type;
2970 arch_info[i] = xscale_reg_arch_info[i];
2971 arch_info[i].target = target;
2972 }
2973
2974 xscale->reg_cache = (*cache_p);
2975 }
2976
2977 int xscale_init_target(struct command_context_s *cmd_ctx, struct target_s *target)
2978 {
2979 return ERROR_OK;
2980 }
2981
2982 int xscale_quit()
2983 {
2984
2985 return ERROR_OK;
2986 }
2987
2988 int xscale_init_arch_info(target_t *target, xscale_common_t *xscale, int chain_pos, char *variant)
2989 {
2990 armv4_5_common_t *armv4_5;
2991 u32 high_reset_branch, low_reset_branch;
2992 int i;
2993
2994 armv4_5 = &xscale->armv4_5_common;
2995
2996 /* store architecture specfic data (none so far) */
2997 xscale->arch_info = NULL;
2998 xscale->common_magic = XSCALE_COMMON_MAGIC;
2999
3000 /* remember the variant (PXA25x, PXA27x, IXP42x, ...) */
3001 xscale->variant = strdup(variant);
3002
3003 /* prepare JTAG information for the new target */
3004 xscale->jtag_info.chain_pos = chain_pos;
3005
3006 xscale->jtag_info.dbgrx = 0x02;
3007 xscale->jtag_info.dbgtx = 0x10;
3008 xscale->jtag_info.dcsr = 0x09;
3009 xscale->jtag_info.ldic = 0x07;
3010
3011 if ((strcmp(xscale->variant, "pxa250") == 0) ||
3012 (strcmp(xscale->variant, "pxa255") == 0) ||
3013 (strcmp(xscale->variant, "pxa26x") == 0))
3014 {
3015 xscale->jtag_info.ir_length = 5;
3016 }
3017 else if ((strcmp(xscale->variant, "pxa27x") == 0) ||
3018 (strcmp(xscale->variant, "ixp42x") == 0) ||
3019 (strcmp(xscale->variant, "ixp45x") == 0) ||
3020 (strcmp(xscale->variant, "ixp46x") == 0))
3021 {
3022 xscale->jtag_info.ir_length = 7;
3023 }
3024
3025 /* the debug handler isn't installed (and thus not running) at this time */
3026 xscale->handler_installed = 0;
3027 xscale->handler_running = 0;
3028 xscale->handler_address = 0xfe000800;
3029
3030 /* clear the vectors we keep locally for reference */
3031 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
3032 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
3033
3034 /* no user-specified vectors have been configured yet */
3035 xscale->static_low_vectors_set = 0x0;
3036 xscale->static_high_vectors_set = 0x0;
3037
3038 /* calculate branches to debug handler */
3039 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
3040 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
3041
3042 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
3043 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
3044
3045 for (i = 1; i <= 7; i++)
3046 {
3047 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3048 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3049 }
3050
3051 /* 64kB aligned region used for DCache cleaning */
3052 xscale->cache_clean_address = 0xfffe0000;
3053
3054 xscale->hold_rst = 0;
3055 xscale->external_debug_break = 0;
3056
3057 xscale->force_hw_bkpts = 1;
3058
3059 xscale->ibcr_available = 2;
3060 xscale->ibcr0_used = 0;
3061 xscale->ibcr1_used = 0;
3062
3063 xscale->dbr_available = 2;
3064 xscale->dbr0_used = 0;
3065 xscale->dbr1_used = 0;
3066
3067 xscale->arm_bkpt = ARMV5_BKPT(0x0);
3068 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
3069
3070 xscale->vector_catch = 0x1;
3071
3072 xscale->trace.capture_status = TRACE_IDLE;
3073 xscale->trace.data = NULL;
3074 xscale->trace.image = NULL;
3075 xscale->trace.buffer_enabled = 0;
3076 xscale->trace.buffer_fill = 0;
3077
3078 /* prepare ARMv4/5 specific information */
3079 armv4_5->arch_info = xscale;
3080 armv4_5->read_core_reg = xscale_read_core_reg;
3081 armv4_5->write_core_reg = xscale_write_core_reg;
3082 armv4_5->full_context = xscale_full_context;
3083
3084 armv4_5_init_arch_info(target, armv4_5);
3085
3086 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
3087 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
3088 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
3089 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
3090 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
3091 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
3092 xscale->armv4_5_mmu.has_tiny_pages = 1;
3093 xscale->armv4_5_mmu.mmu_enabled = 0;
3094
3095 return ERROR_OK;
3096 }
3097
3098 /* target xscale <endianess> <startup_mode> <chain_pos> <variant> */
3099 int xscale_target_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc, struct target_s *target)
3100 {
3101 int chain_pos;
3102 char *variant = NULL;
3103 xscale_common_t *xscale = malloc(sizeof(xscale_common_t));
3104 memset(xscale, 0, sizeof(*xscale));
3105
3106 if (argc < 5)
3107 {
3108 LOG_ERROR("'target xscale' requires four arguments: <endianess> <startup_mode> <chain_pos> <variant>");
3109 return ERROR_OK;
3110 }
3111
3112 chain_pos = strtoul(args[3], NULL, 0);
3113
3114 variant = args[4];
3115
3116 xscale_init_arch_info(target, xscale, chain_pos, variant);
3117 xscale_build_reg_cache(target);
3118
3119 return ERROR_OK;
3120 }
3121
3122 int xscale_handle_debug_handler_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3123 {
3124 target_t *target = NULL;
3125 armv4_5_common_t *armv4_5;
3126 xscale_common_t *xscale;
3127
3128 u32 handler_address;
3129
3130 if (argc < 2)
3131 {
3132 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3133 return ERROR_OK;
3134 }
3135
3136 if ((target = get_target_by_num(strtoul(args[0], NULL, 0))) == NULL)
3137 {
3138 LOG_ERROR("no target '%s' configured", args[0]);
3139 return ERROR_OK;
3140 }
3141
3142 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3143 {
3144 return ERROR_OK;
3145 }
3146
3147 handler_address = strtoul(args[1], NULL, 0);
3148
3149 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3150 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3151 {
3152 xscale->handler_address = handler_address;
3153 }
3154 else
3155 {
3156 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3157 }
3158
3159 return ERROR_OK;
3160 }
3161
3162 int xscale_handle_cache_clean_address_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3163 {
3164 target_t *target = NULL;
3165 armv4_5_common_t *armv4_5;
3166 xscale_common_t *xscale;
3167
3168 u32 cache_clean_address;
3169
3170 if (argc < 2)
3171 {
3172 LOG_ERROR("'xscale cache_clean_address <target#> <address>' command takes two required operands");
3173 return ERROR_OK;
3174 }
3175
3176 if ((target = get_target_by_num(strtoul(args[0], NULL, 0))) == NULL)
3177 {
3178 LOG_ERROR("no target '%s' configured", args[0]);
3179 return ERROR_OK;
3180 }
3181
3182 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3183 {
3184 return ERROR_OK;
3185 }
3186
3187 cache_clean_address = strtoul(args[1], NULL, 0);
3188
3189 if (cache_clean_address & 0xffff)
3190 {
3191 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3192 }
3193 else
3194 {
3195 xscale->cache_clean_address = cache_clean_address;
3196 }
3197
3198 return ERROR_OK;
3199 }
3200
3201 int xscale_handle_cache_info_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3202 {
3203 target_t *target = get_current_target(cmd_ctx);
3204 armv4_5_common_t *armv4_5;
3205 xscale_common_t *xscale;
3206
3207 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3208 {
3209 return ERROR_OK;
3210 }
3211
3212 return armv4_5_handle_cache_info_command(cmd_ctx, &xscale->armv4_5_mmu.armv4_5_cache);
3213 }
3214
3215 static int xscale_virt2phys(struct target_s *target, u32 virtual, u32 *physical)
3216 {
3217 armv4_5_common_t *armv4_5;
3218 xscale_common_t *xscale;
3219 int retval;
3220 int type;
3221 u32 cb;
3222 int domain;
3223 u32 ap;
3224
3225
3226 if ((retval = xscale_get_arch_pointers(target, &armv4_5, &xscale)) != ERROR_OK)
3227 {
3228 return retval;
3229 }
3230 u32 ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3231 if (type == -1)
3232 {
3233 return ret;
3234 }
3235 *physical = ret;
3236 return ERROR_OK;
3237 }
3238
3239 static int xscale_mmu(struct target_s *target, int *enabled)
3240 {
3241 armv4_5_common_t *armv4_5 = target->arch_info;
3242 xscale_common_t *xscale = armv4_5->arch_info;
3243
3244 if (target->state != TARGET_HALTED)
3245 {
3246 LOG_ERROR("Target not halted");
3247 return ERROR_TARGET_INVALID;
3248 }
3249 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3250 return ERROR_OK;
3251 }
3252
3253
3254 int xscale_handle_mmu_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3255 {
3256 target_t *target = get_current_target(cmd_ctx);
3257 armv4_5_common_t *armv4_5;
3258 xscale_common_t *xscale;
3259
3260 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3261 {
3262 return ERROR_OK;
3263 }
3264
3265 if (target->state != TARGET_HALTED)
3266 {
3267 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3268 return ERROR_OK;
3269 }
3270
3271 if (argc >= 1)
3272 {
3273 if (strcmp("enable", args[0]) == 0)
3274 {
3275 xscale_enable_mmu_caches(target, 1, 0, 0);
3276 xscale->armv4_5_mmu.mmu_enabled = 1;
3277 }
3278 else if (strcmp("disable", args[0]) == 0)
3279 {
3280 xscale_disable_mmu_caches(target, 1, 0, 0);
3281 xscale->armv4_5_mmu.mmu_enabled = 0;
3282 }
3283 }
3284
3285 command_print(cmd_ctx, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3286
3287 return ERROR_OK;
3288 }
3289
3290 int xscale_handle_idcache_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3291 {
3292 target_t *target = get_current_target(cmd_ctx);
3293 armv4_5_common_t *armv4_5;
3294 xscale_common_t *xscale;
3295 int icache = 0, dcache = 0;
3296
3297 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3298 {
3299 return ERROR_OK;
3300 }
3301
3302 if (target->state != TARGET_HALTED)
3303 {
3304 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3305 return ERROR_OK;
3306 }
3307
3308 if (strcmp(cmd, "icache") == 0)
3309 icache = 1;
3310 else if (strcmp(cmd, "dcache") == 0)
3311 dcache = 1;
3312
3313 if (argc >= 1)
3314 {
3315 if (strcmp("enable", args[0]) == 0)
3316 {
3317 xscale_enable_mmu_caches(target, 0, dcache, icache);
3318
3319 if (icache)
3320 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 1;
3321 else if (dcache)
3322 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 1;
3323 }
3324 else if (strcmp("disable", args[0]) == 0)
3325 {
3326 xscale_disable_mmu_caches(target, 0, dcache, icache);
3327
3328 if (icache)
3329 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 0;
3330 else if (dcache)
3331 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 0;
3332 }
3333 }
3334
3335 if (icache)
3336 command_print(cmd_ctx, "icache %s", (xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled) ? "enabled" : "disabled");
3337
3338 if (dcache)
3339 command_print(cmd_ctx, "dcache %s", (xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled) ? "enabled" : "disabled");
3340
3341 return ERROR_OK;
3342 }
3343
3344 int xscale_handle_vector_catch_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3345 {
3346 target_t *target = get_current_target(cmd_ctx);
3347 armv4_5_common_t *armv4_5;
3348 xscale_common_t *xscale;
3349
3350 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3351 {
3352 return ERROR_OK;
3353 }
3354
3355 if (argc < 1)
3356 {
3357 command_print(cmd_ctx, "usage: xscale vector_catch [mask]");
3358 }
3359 else
3360 {
3361 xscale->vector_catch = strtoul(args[0], NULL, 0);
3362 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3363 xscale_write_dcsr(target, -1, -1);
3364 }
3365
3366 command_print(cmd_ctx, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3367
3368 return ERROR_OK;
3369 }
3370
3371 int xscale_handle_force_hw_bkpts_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3372 {
3373 target_t *target = get_current_target(cmd_ctx);
3374 armv4_5_common_t *armv4_5;
3375 xscale_common_t *xscale;
3376
3377 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3378 {
3379 return ERROR_OK;
3380 }
3381
3382 if ((argc >= 1) && (strcmp("enable", args[0]) == 0))
3383 {
3384 xscale->force_hw_bkpts = 1;
3385 }
3386 else if ((argc >= 1) && (strcmp("disable", args[0]) == 0))
3387 {
3388 xscale->force_hw_bkpts = 0;
3389 }
3390 else
3391 {
3392 command_print(cmd_ctx, "usage: xscale force_hw_bkpts <enable|disable>");
3393 }
3394
3395 command_print(cmd_ctx, "force hardware breakpoints %s", (xscale->force_hw_bkpts) ? "enabled" : "disabled");
3396
3397 return ERROR_OK;
3398 }
3399
3400 int xscale_handle_trace_buffer_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3401 {
3402 target_t *target = get_current_target(cmd_ctx);
3403 armv4_5_common_t *armv4_5;
3404 xscale_common_t *xscale;
3405 u32 dcsr_value;
3406
3407 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3408 {
3409 return ERROR_OK;
3410 }
3411
3412 if (target->state != TARGET_HALTED)
3413 {
3414 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3415 return ERROR_OK;
3416 }
3417
3418 if ((argc >= 1) && (strcmp("enable", args[0]) == 0))
3419 {
3420 xscale_trace_data_t *td, *next_td;
3421 xscale->trace.buffer_enabled = 1;
3422
3423 /* free old trace data */
3424 td = xscale->trace.data;
3425 while (td)
3426 {
3427 next_td = td->next;
3428
3429 if (td->entries)
3430 free(td->entries);
3431 free(td);
3432 td = next_td;
3433 }
3434 xscale->trace.data = NULL;
3435 }
3436 else if ((argc >= 1) && (strcmp("disable", args[0]) == 0))
3437 {
3438 xscale->trace.buffer_enabled = 0;
3439 }
3440
3441 if ((argc >= 2) && (strcmp("fill", args[1]) == 0))
3442 {
3443 if (argc >= 3)
3444 xscale->trace.buffer_fill = strtoul(args[2], NULL, 0);
3445 else
3446 xscale->trace.buffer_fill = 1;
3447 }
3448 else if ((argc >= 2) && (strcmp("wrap", args[1]) == 0))
3449 {
3450 xscale->trace.buffer_fill = -1;
3451 }
3452
3453 if (xscale->trace.buffer_enabled)
3454 {
3455 /* if we enable the trace buffer in fill-once
3456 * mode we know the address of the first instruction */
3457 xscale->trace.pc_ok = 1;
3458 xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
3459 }
3460 else
3461 {
3462 /* otherwise the address is unknown, and we have no known good PC */
3463 xscale->trace.pc_ok = 0;
3464 }
3465
3466 command_print(cmd_ctx, "trace buffer %s (%s)",
3467 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3468 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3469
3470 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3471 if (xscale->trace.buffer_fill >= 0)
3472 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3473 else
3474 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3475
3476 return ERROR_OK;
3477 }
3478
3479 int xscale_handle_trace_image_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3480 {
3481 target_t *target;
3482 armv4_5_common_t *armv4_5;
3483 xscale_common_t *xscale;
3484
3485 if (argc < 1)
3486 {
3487 command_print(cmd_ctx, "usage: xscale trace_image <file> [base address] [type]");
3488 return ERROR_OK;
3489 }
3490
3491 target = get_current_target(cmd_ctx);
3492
3493 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3494 {
3495 return ERROR_OK;
3496 }
3497
3498 if (xscale->trace.image)
3499 {
3500 image_close(xscale->trace.image);
3501 free(xscale->trace.image);
3502 command_print(cmd_ctx, "previously loaded image found and closed");
3503 }
3504
3505 xscale->trace.image = malloc(sizeof(image_t));
3506 xscale->trace.image->base_address_set = 0;
3507 xscale->trace.image->start_address_set = 0;
3508
3509 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3510 if (argc >= 2)
3511 {
3512 xscale->trace.image->base_address_set = 1;
3513 xscale->trace.image->base_address = strtoul(args[1], NULL, 0);
3514 }
3515 else
3516 {
3517 xscale->trace.image->base_address_set = 0;
3518 }
3519
3520 if (image_open(xscale->trace.image, args[0], (argc >= 3) ? args[2] : NULL) != ERROR_OK)
3521 {
3522 free(xscale->trace.image);
3523 xscale->trace.image = NULL;
3524 return ERROR_OK;
3525 }
3526
3527 return ERROR_OK;
3528 }
3529
3530 int xscale_handle_dump_trace_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3531 {
3532 target_t *target = get_current_target(cmd_ctx);
3533 armv4_5_common_t *armv4_5;
3534 xscale_common_t *xscale;
3535 xscale_trace_data_t *trace_data;
3536 fileio_t file;
3537
3538 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3539 {
3540 return ERROR_OK;
3541 }
3542
3543 if (target->state != TARGET_HALTED)
3544 {
3545 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3546 return ERROR_OK;
3547 }
3548
3549 if (argc < 1)
3550 {
3551 command_print(cmd_ctx, "usage: xscale dump_trace <file>");
3552 return ERROR_OK;
3553 }
3554
3555 trace_data = xscale->trace.data;
3556
3557 if (!trace_data)
3558 {
3559 command_print(cmd_ctx, "no trace data collected");
3560 return ERROR_OK;
3561 }
3562
3563 if (fileio_open(&file, args[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3564 {
3565 return ERROR_OK;
3566 }
3567
3568 while (trace_data)
3569 {
3570 int i;
3571
3572 fileio_write_u32(&file, trace_data->chkpt0);
3573 fileio_write_u32(&file, trace_data->chkpt1);
3574 fileio_write_u32(&file, trace_data->last_instruction);
3575 fileio_write_u32(&file, trace_data->depth);
3576
3577 for (i = 0; i < trace_data->depth; i++)
3578 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3579
3580 trace_data = trace_data->next;
3581 }
3582
3583 fileio_close(&file);
3584
3585 return ERROR_OK;
3586 }
3587
3588 int xscale_handle_analyze_trace_buffer_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3589 {
3590 target_t *target = get_current_target(cmd_ctx);
3591 armv4_5_common_t *armv4_5;
3592 xscale_common_t *xscale;
3593
3594 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3595 {
3596 return ERROR_OK;
3597 }
3598
3599 xscale_analyze_trace(target, cmd_ctx);
3600
3601 return ERROR_OK;
3602 }
3603
3604 int xscale_handle_cp15(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3605 {
3606 target_t *target = get_current_target(cmd_ctx);
3607 armv4_5_common_t *armv4_5;
3608 xscale_common_t *xscale;
3609
3610 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3611 {
3612 return ERROR_OK;
3613 }
3614
3615 if (target->state != TARGET_HALTED)
3616 {
3617 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3618 return ERROR_OK;
3619 }
3620 u32 reg_no = 0;
3621 reg_t *reg = NULL;
3622 if(argc > 0)
3623 {
3624 reg_no = strtoul(args[0], NULL, 0);
3625 /*translate from xscale cp15 register no to openocd register*/
3626 switch(reg_no)
3627 {
3628 case 0:
3629 reg_no = XSCALE_MAINID;
3630 break;
3631 case 1:
3632 reg_no = XSCALE_CTRL;
3633 break;
3634 case 2:
3635 reg_no = XSCALE_TTB;
3636 break;
3637 case 3:
3638 reg_no = XSCALE_DAC;
3639 break;
3640 case 5:
3641 reg_no = XSCALE_FSR;
3642 break;
3643 case 6:
3644 reg_no = XSCALE_FAR;
3645 break;
3646 case 13:
3647 reg_no = XSCALE_PID;
3648 break;
3649 case 15:
3650 reg_no = XSCALE_CPACCESS;
3651 break;
3652 default:
3653 command_print(cmd_ctx, "invalid register number");
3654 return ERROR_INVALID_ARGUMENTS;
3655 }
3656 reg = &xscale->reg_cache->reg_list[reg_no];
3657
3658 }
3659 if(argc == 1)
3660 {
3661 u32 value;
3662
3663 /* read cp15 control register */
3664 xscale_get_reg(reg);
3665 value = buf_get_u32(reg->value, 0, 32);
3666 command_print(cmd_ctx, "%s (/%i): 0x%x", reg->name, reg->size, value);
3667 }
3668 else if(argc == 2)
3669 {
3670
3671 u32 value = strtoul(args[1], NULL, 0);
3672
3673 /* send CP write request (command 0x41) */
3674 xscale_send_u32(target, 0x41);
3675
3676 /* send CP register number */
3677 xscale_send_u32(target, reg_no);
3678
3679 /* send CP register value */
3680 xscale_send_u32(target, value);
3681
3682 /* execute cpwait to ensure outstanding operations complete */
3683 xscale_send_u32(target, 0x53);
3684 }
3685 else
3686 {
3687 command_print(cmd_ctx, "usage: cp15 [register]<, [value]>");
3688 }
3689
3690 return ERROR_OK;
3691 }
3692
3693 int xscale_register_commands(struct command_context_s *cmd_ctx)
3694 {
3695 command_t *xscale_cmd;
3696
3697 xscale_cmd = register_command(cmd_ctx, NULL, "xscale", NULL, COMMAND_ANY, "xscale specific commands");
3698
3699 register_command(cmd_ctx, xscale_cmd, "debug_handler", xscale_handle_debug_handler_command, COMMAND_ANY, "'xscale debug_handler <target#> <address>' command takes two required operands");
3700 register_command(cmd_ctx, xscale_cmd, "cache_clean_address", xscale_handle_cache_clean_address_command, COMMAND_ANY, NULL);
3701
3702 register_command(cmd_ctx, xscale_cmd, "cache_info", xscale_handle_cache_info_command, COMMAND_EXEC, NULL);
3703 register_command(cmd_ctx, xscale_cmd, "mmu", xscale_handle_mmu_command, COMMAND_EXEC, "['enable'|'disable'] the MMU");
3704 register_command(cmd_ctx, xscale_cmd, "icache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the ICache");
3705 register_command(cmd_ctx, xscale_cmd, "dcache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the DCache");
3706
3707 register_command(cmd_ctx, xscale_cmd, "vector_catch", xscale_handle_idcache_command, COMMAND_EXEC, "<mask> of vectors that should be catched");
3708
3709 register_command(cmd_ctx, xscale_cmd, "trace_buffer", xscale_handle_trace_buffer_command, COMMAND_EXEC, "<enable|disable> ['fill' [n]|'wrap']");
3710
3711 register_command(cmd_ctx, xscale_cmd, "dump_trace", xscale_handle_dump_trace_command, COMMAND_EXEC, "dump content of trace buffer to <file>");
3712 register_command(cmd_ctx, xscale_cmd, "analyze_trace", xscale_handle_analyze_trace_buffer_command, COMMAND_EXEC, "analyze content of trace buffer");
3713 register_command(cmd_ctx, xscale_cmd, "trace_image", xscale_handle_trace_image_command,
3714 COMMAND_EXEC, "load image from <file> [base address]");
3715
3716 register_command(cmd_ctx, xscale_cmd, "cp15", xscale_handle_cp15, COMMAND_EXEC, "access coproc 15 <register> [value]");
3717
3718 armv4_5_register_commands(cmd_ctx);
3719
3720 return ERROR_OK;
3721 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)