fdb123041af491f28f2ccca71497a684f25e5e87
[openocd.git] / src / target / xscale.c
1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program; if not, write to the *
17 * Free Software Foundation, Inc., *
18 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
19 ***************************************************************************/
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "replacements.h"
25
26 #include "xscale.h"
27
28 #include "register.h"
29 #include "target.h"
30 #include "armv4_5.h"
31 #include "arm_simulator.h"
32 #include "arm_disassembler.h"
33 #include "log.h"
34 #include "jtag.h"
35 #include "binarybuffer.h"
36 #include "time_support.h"
37 #include "breakpoints.h"
38 #include "fileio.h"
39
40 #include <stdlib.h>
41 #include <string.h>
42
43 #include <sys/types.h>
44 #include <unistd.h>
45 #include <errno.h>
46
47
48 /* cli handling */
49 int xscale_register_commands(struct command_context_s *cmd_ctx);
50
51 /* forward declarations */
52 int xscale_target_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc, struct target_s *target);
53 int xscale_init_target(struct command_context_s *cmd_ctx, struct target_s *target);
54 int xscale_quit();
55
56 int xscale_arch_state(struct target_s *target);
57 int xscale_poll(target_t *target);
58 int xscale_halt(target_t *target);
59 int xscale_resume(struct target_s *target, int current, u32 address, int handle_breakpoints, int debug_execution);
60 int xscale_step(struct target_s *target, int current, u32 address, int handle_breakpoints);
61 int xscale_debug_entry(target_t *target);
62 int xscale_restore_context(target_t *target);
63
64 int xscale_assert_reset(target_t *target);
65 int xscale_deassert_reset(target_t *target);
66 int xscale_soft_reset_halt(struct target_s *target);
67 int xscale_prepare_reset_halt(struct target_s *target);
68
69 int xscale_set_reg_u32(reg_t *reg, u32 value);
70
71 int xscale_read_core_reg(struct target_s *target, int num, enum armv4_5_mode mode);
72 int xscale_write_core_reg(struct target_s *target, int num, enum armv4_5_mode mode, u32 value);
73
74 int xscale_read_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer);
75 int xscale_write_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer);
76 int xscale_bulk_write_memory(target_t *target, u32 address, u32 count, u8 *buffer);
77 int xscale_checksum_memory(struct target_s *target, u32 address, u32 count, u32* checksum);
78
79 int xscale_add_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
80 int xscale_remove_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
81 int xscale_set_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
82 int xscale_unset_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
83 int xscale_add_watchpoint(struct target_s *target, watchpoint_t *watchpoint);
84 int xscale_remove_watchpoint(struct target_s *target, watchpoint_t *watchpoint);
85 void xscale_enable_watchpoints(struct target_s *target);
86 void xscale_enable_breakpoints(struct target_s *target);
87 static int xscale_virt2phys(struct target_s *target, u32 virtual, u32 *physical);
88 static int xscale_mmu(struct target_s *target, int *enabled);
89
90 int xscale_read_trace(target_t *target);
91
92 target_type_t xscale_target =
93 {
94 .name = "xscale",
95
96 .poll = xscale_poll,
97 .arch_state = xscale_arch_state,
98
99 .target_request_data = NULL,
100
101 .halt = xscale_halt,
102 .resume = xscale_resume,
103 .step = xscale_step,
104
105 .assert_reset = xscale_assert_reset,
106 .deassert_reset = xscale_deassert_reset,
107 .soft_reset_halt = xscale_soft_reset_halt,
108 .prepare_reset_halt = xscale_prepare_reset_halt,
109
110 .get_gdb_reg_list = armv4_5_get_gdb_reg_list,
111
112 .read_memory = xscale_read_memory,
113 .write_memory = xscale_write_memory,
114 .bulk_write_memory = xscale_bulk_write_memory,
115 .checksum_memory = xscale_checksum_memory,
116
117 .run_algorithm = armv4_5_run_algorithm,
118
119 .add_breakpoint = xscale_add_breakpoint,
120 .remove_breakpoint = xscale_remove_breakpoint,
121 .add_watchpoint = xscale_add_watchpoint,
122 .remove_watchpoint = xscale_remove_watchpoint,
123
124 .register_commands = xscale_register_commands,
125 .target_command = xscale_target_command,
126 .init_target = xscale_init_target,
127 .quit = xscale_quit,
128
129 .virt2phys = xscale_virt2phys,
130 .mmu = xscale_mmu
131 };
132
133 char* xscale_reg_list[] =
134 {
135 "XSCALE_MAINID", /* 0 */
136 "XSCALE_CACHETYPE",
137 "XSCALE_CTRL",
138 "XSCALE_AUXCTRL",
139 "XSCALE_TTB",
140 "XSCALE_DAC",
141 "XSCALE_FSR",
142 "XSCALE_FAR",
143 "XSCALE_PID",
144 "XSCALE_CPACCESS",
145 "XSCALE_IBCR0", /* 10 */
146 "XSCALE_IBCR1",
147 "XSCALE_DBR0",
148 "XSCALE_DBR1",
149 "XSCALE_DBCON",
150 "XSCALE_TBREG",
151 "XSCALE_CHKPT0",
152 "XSCALE_CHKPT1",
153 "XSCALE_DCSR",
154 "XSCALE_TX",
155 "XSCALE_RX", /* 20 */
156 "XSCALE_TXRXCTRL",
157 };
158
159 xscale_reg_t xscale_reg_arch_info[] =
160 {
161 {XSCALE_MAINID, NULL},
162 {XSCALE_CACHETYPE, NULL},
163 {XSCALE_CTRL, NULL},
164 {XSCALE_AUXCTRL, NULL},
165 {XSCALE_TTB, NULL},
166 {XSCALE_DAC, NULL},
167 {XSCALE_FSR, NULL},
168 {XSCALE_FAR, NULL},
169 {XSCALE_PID, NULL},
170 {XSCALE_CPACCESS, NULL},
171 {XSCALE_IBCR0, NULL},
172 {XSCALE_IBCR1, NULL},
173 {XSCALE_DBR0, NULL},
174 {XSCALE_DBR1, NULL},
175 {XSCALE_DBCON, NULL},
176 {XSCALE_TBREG, NULL},
177 {XSCALE_CHKPT0, NULL},
178 {XSCALE_CHKPT1, NULL},
179 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
180 {-1, NULL}, /* TX accessed via JTAG */
181 {-1, NULL}, /* RX accessed via JTAG */
182 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
183 };
184
185 int xscale_reg_arch_type = -1;
186
187 int xscale_get_reg(reg_t *reg);
188 int xscale_set_reg(reg_t *reg, u8 *buf);
189
190 int xscale_get_arch_pointers(target_t *target, armv4_5_common_t **armv4_5_p, xscale_common_t **xscale_p)
191 {
192 armv4_5_common_t *armv4_5 = target->arch_info;
193 xscale_common_t *xscale = armv4_5->arch_info;
194
195 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
196 {
197 ERROR("target isn't an XScale target");
198 return -1;
199 }
200
201 if (xscale->common_magic != XSCALE_COMMON_MAGIC)
202 {
203 ERROR("target isn't an XScale target");
204 return -1;
205 }
206
207 *armv4_5_p = armv4_5;
208 *xscale_p = xscale;
209
210 return ERROR_OK;
211 }
212
213 int xscale_jtag_set_instr(int chain_pos, u32 new_instr)
214 {
215 jtag_device_t *device = jtag_get_device(chain_pos);
216
217 if (buf_get_u32(device->cur_instr, 0, device->ir_length) != new_instr)
218 {
219 scan_field_t field;
220
221 field.device = chain_pos;
222 field.num_bits = device->ir_length;
223 field.out_value = calloc(CEIL(field.num_bits, 8), 1);
224 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
225 field.out_mask = NULL;
226 field.in_value = NULL;
227 jtag_set_check_value(&field, device->expected, device->expected_mask, NULL);
228
229 jtag_add_ir_scan(1, &field, -1);
230
231 free(field.out_value);
232 }
233
234 return ERROR_OK;
235 }
236
237 int xscale_jtag_callback(enum jtag_event event, void *priv)
238 {
239 switch (event)
240 {
241 case JTAG_TRST_ASSERTED:
242 break;
243 case JTAG_TRST_RELEASED:
244 break;
245 case JTAG_SRST_ASSERTED:
246 break;
247 case JTAG_SRST_RELEASED:
248 break;
249 default:
250 WARNING("unhandled JTAG event");
251 }
252
253 return ERROR_OK;
254 }
255
256 int xscale_read_dcsr(target_t *target)
257 {
258 armv4_5_common_t *armv4_5 = target->arch_info;
259 xscale_common_t *xscale = armv4_5->arch_info;
260
261 int retval;
262
263 scan_field_t fields[3];
264 u8 field0 = 0x0;
265 u8 field0_check_value = 0x2;
266 u8 field0_check_mask = 0x7;
267 u8 field2 = 0x0;
268 u8 field2_check_value = 0x0;
269 u8 field2_check_mask = 0x1;
270
271 jtag_add_end_state(TAP_PD);
272 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dcsr);
273
274 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
275 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
276
277 fields[0].device = xscale->jtag_info.chain_pos;
278 fields[0].num_bits = 3;
279 fields[0].out_value = &field0;
280 fields[0].out_mask = NULL;
281 fields[0].in_value = NULL;
282 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
283
284 fields[1].device = xscale->jtag_info.chain_pos;
285 fields[1].num_bits = 32;
286 fields[1].out_value = NULL;
287 fields[1].out_mask = NULL;
288 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
289 fields[1].in_handler = NULL;
290 fields[1].in_handler_priv = NULL;
291 fields[1].in_check_value = NULL;
292 fields[1].in_check_mask = NULL;
293
294
295
296 fields[2].device = xscale->jtag_info.chain_pos;
297 fields[2].num_bits = 1;
298 fields[2].out_value = &field2;
299 fields[2].out_mask = NULL;
300 fields[2].in_value = NULL;
301 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
302
303 jtag_add_dr_scan(3, fields, -1);
304
305 if ((retval = jtag_execute_queue()) != ERROR_OK)
306 {
307 ERROR("JTAG error while reading DCSR");
308 return retval;
309 }
310
311 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
312 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
313
314 /* write the register with the value we just read
315 * on this second pass, only the first bit of field0 is guaranteed to be 0)
316 */
317 field0_check_mask = 0x1;
318 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
319 fields[1].in_value = NULL;
320
321 jtag_add_end_state(TAP_RTI);
322
323 jtag_add_dr_scan(3, fields, -1);
324
325 return ERROR_OK;
326 }
327
328 int xscale_receive(target_t *target, u32 *buffer, int num_words)
329 {
330 int retval=ERROR_OK;
331 armv4_5_common_t *armv4_5 = target->arch_info;
332 xscale_common_t *xscale = armv4_5->arch_info;
333
334 enum tap_state path[3];
335 scan_field_t fields[3];
336
337 u8 *field0 = malloc(num_words * 1);
338 u8 field0_check_value = 0x2;
339 u8 field0_check_mask = 0x6;
340 u32 *field1 = malloc(num_words * 4);
341 u8 field2_check_value = 0x0;
342 u8 field2_check_mask = 0x1;
343 int words_done = 0;
344 int words_scheduled = 0;
345
346 int i;
347
348 path[0] = TAP_SDS;
349 path[1] = TAP_CD;
350 path[2] = TAP_SD;
351
352 fields[0].device = xscale->jtag_info.chain_pos;
353 fields[0].num_bits = 3;
354 fields[0].out_value = NULL;
355 fields[0].out_mask = NULL;
356 /* fields[0].in_value = field0; */
357 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
358
359 fields[1].device = xscale->jtag_info.chain_pos;
360 fields[1].num_bits = 32;
361 fields[1].out_value = NULL;
362 fields[1].out_mask = NULL;
363 fields[1].in_value = NULL;
364 fields[1].in_handler = NULL;
365 fields[1].in_handler_priv = NULL;
366 fields[1].in_check_value = NULL;
367 fields[1].in_check_mask = NULL;
368
369
370
371 fields[2].device = xscale->jtag_info.chain_pos;
372 fields[2].num_bits = 1;
373 fields[2].out_value = NULL;
374 fields[2].out_mask = NULL;
375 fields[2].in_value = NULL;
376 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
377
378 jtag_add_end_state(TAP_RTI);
379 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgtx);
380 jtag_add_runtest(1, -1);
381
382 /* repeat until all words have been collected */
383 int attempts=0;
384 while (words_done < num_words)
385 {
386 /* schedule reads */
387 words_scheduled = 0;
388 for (i = words_done; i < num_words; i++)
389 {
390 fields[0].in_value = &field0[i];
391 fields[1].in_handler = buf_to_u32_handler;
392 fields[1].in_handler_priv = (u8*)&field1[i];
393
394 jtag_add_pathmove(3, path);
395 jtag_add_dr_scan(3, fields, TAP_RTI);
396 words_scheduled++;
397 }
398
399 if ((retval = jtag_execute_queue()) != ERROR_OK)
400 {
401 ERROR("JTAG error while receiving data from debug handler");
402 break;
403 }
404
405 /* examine results */
406 for (i = words_done; i < num_words; i++)
407 {
408 if (!(field0[0] & 1))
409 {
410 /* move backwards if necessary */
411 int j;
412 for (j = i; j < num_words - 1; j++)
413 {
414 field0[j] = field0[j+1];
415 field1[j] = field1[j+1];
416 }
417 words_scheduled--;
418 }
419 }
420 if (words_scheduled==0)
421 {
422 if (attempts++==1000)
423 {
424 ERROR("Failed to receiving data from debug handler after 1000 attempts");
425 retval=ERROR_JTAG_QUEUE_FAILED;
426 break;
427 }
428 }
429
430 words_done += words_scheduled;
431 }
432
433 for (i = 0; i < num_words; i++)
434 *(buffer++) = buf_get_u32((u8*)&field1[i], 0, 32);
435
436 free(field1);
437
438 return retval;
439 }
440
441 int xscale_read_tx(target_t *target, int consume)
442 {
443 armv4_5_common_t *armv4_5 = target->arch_info;
444 xscale_common_t *xscale = armv4_5->arch_info;
445 enum tap_state path[3];
446 enum tap_state noconsume_path[9];
447
448 int retval;
449 struct timeval timeout, now;
450
451 scan_field_t fields[3];
452 u8 field0_in = 0x0;
453 u8 field0_check_value = 0x2;
454 u8 field0_check_mask = 0x6;
455 u8 field2_check_value = 0x0;
456 u8 field2_check_mask = 0x1;
457
458 jtag_add_end_state(TAP_RTI);
459
460 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgtx);
461
462 path[0] = TAP_SDS;
463 path[1] = TAP_CD;
464 path[2] = TAP_SD;
465
466 noconsume_path[0] = TAP_SDS;
467 noconsume_path[1] = TAP_CD;
468 noconsume_path[2] = TAP_E1D;
469 noconsume_path[3] = TAP_PD;
470 noconsume_path[4] = TAP_E2D;
471 noconsume_path[5] = TAP_UD;
472 noconsume_path[6] = TAP_SDS;
473 noconsume_path[7] = TAP_CD;
474 noconsume_path[8] = TAP_SD;
475
476 fields[0].device = xscale->jtag_info.chain_pos;
477 fields[0].num_bits = 3;
478 fields[0].out_value = NULL;
479 fields[0].out_mask = NULL;
480 fields[0].in_value = &field0_in;
481 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
482
483 fields[1].device = xscale->jtag_info.chain_pos;
484 fields[1].num_bits = 32;
485 fields[1].out_value = NULL;
486 fields[1].out_mask = NULL;
487 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
488 fields[1].in_handler = NULL;
489 fields[1].in_handler_priv = NULL;
490 fields[1].in_check_value = NULL;
491 fields[1].in_check_mask = NULL;
492
493
494
495 fields[2].device = xscale->jtag_info.chain_pos;
496 fields[2].num_bits = 1;
497 fields[2].out_value = NULL;
498 fields[2].out_mask = NULL;
499 fields[2].in_value = NULL;
500 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
501
502 gettimeofday(&timeout, NULL);
503 timeval_add_time(&timeout, 5, 0);
504
505 do
506 {
507 /* if we want to consume the register content (i.e. clear TX_READY),
508 * we have to go straight from Capture-DR to Shift-DR
509 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
510 */
511 if (consume)
512 jtag_add_pathmove(3, path);
513 else
514 jtag_add_pathmove(sizeof(noconsume_path)/sizeof(*noconsume_path), noconsume_path);
515
516 jtag_add_dr_scan(3, fields, TAP_RTI);
517
518 if ((retval = jtag_execute_queue()) != ERROR_OK)
519 {
520 ERROR("JTAG error while reading TX");
521 return ERROR_TARGET_TIMEOUT;
522 }
523
524 gettimeofday(&now, NULL);
525 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
526 {
527 ERROR("time out reading TX register");
528 return ERROR_TARGET_TIMEOUT;
529 }
530 } while ((!(field0_in & 1)) && consume);
531
532 if (!(field0_in & 1))
533 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
534
535 return ERROR_OK;
536 }
537
538 int xscale_write_rx(target_t *target)
539 {
540 armv4_5_common_t *armv4_5 = target->arch_info;
541 xscale_common_t *xscale = armv4_5->arch_info;
542
543 int retval;
544 struct timeval timeout, now;
545
546 scan_field_t fields[3];
547 u8 field0_out = 0x0;
548 u8 field0_in = 0x0;
549 u8 field0_check_value = 0x2;
550 u8 field0_check_mask = 0x6;
551 u8 field2 = 0x0;
552 u8 field2_check_value = 0x0;
553 u8 field2_check_mask = 0x1;
554
555 jtag_add_end_state(TAP_RTI);
556
557 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgrx);
558
559 fields[0].device = xscale->jtag_info.chain_pos;
560 fields[0].num_bits = 3;
561 fields[0].out_value = &field0_out;
562 fields[0].out_mask = NULL;
563 fields[0].in_value = &field0_in;
564 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
565
566 fields[1].device = xscale->jtag_info.chain_pos;
567 fields[1].num_bits = 32;
568 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
569 fields[1].out_mask = NULL;
570 fields[1].in_value = NULL;
571 fields[1].in_handler = NULL;
572 fields[1].in_handler_priv = NULL;
573 fields[1].in_check_value = NULL;
574 fields[1].in_check_mask = NULL;
575
576
577
578 fields[2].device = xscale->jtag_info.chain_pos;
579 fields[2].num_bits = 1;
580 fields[2].out_value = &field2;
581 fields[2].out_mask = NULL;
582 fields[2].in_value = NULL;
583 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
584
585 gettimeofday(&timeout, NULL);
586 timeval_add_time(&timeout, 5, 0);
587
588 /* poll until rx_read is low */
589 DEBUG("polling RX");
590 do
591 {
592 jtag_add_dr_scan(3, fields, TAP_RTI);
593
594 if ((retval = jtag_execute_queue()) != ERROR_OK)
595 {
596 ERROR("JTAG error while writing RX");
597 return retval;
598 }
599
600 gettimeofday(&now, NULL);
601 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
602 {
603 ERROR("time out writing RX register");
604 return ERROR_TARGET_TIMEOUT;
605 }
606 } while (field0_in & 1);
607
608 /* set rx_valid */
609 field2 = 0x1;
610 jtag_add_dr_scan(3, fields, TAP_RTI);
611
612 if ((retval = jtag_execute_queue()) != ERROR_OK)
613 {
614 ERROR("JTAG error while writing RX");
615 return retval;
616 }
617
618 return ERROR_OK;
619 }
620
621 /* send count elements of size byte to the debug handler */
622 int xscale_send(target_t *target, u8 *buffer, int count, int size)
623 {
624 armv4_5_common_t *armv4_5 = target->arch_info;
625 xscale_common_t *xscale = armv4_5->arch_info;
626
627 int retval;
628
629 int done_count = 0;
630 u8 output[4] = {0, 0, 0, 0};
631
632 scan_field_t fields[3];
633 u8 field0_out = 0x0;
634 u8 field0_check_value = 0x2;
635 u8 field0_check_mask = 0x6;
636 u8 field2 = 0x1;
637 u8 field2_check_value = 0x0;
638 u8 field2_check_mask = 0x1;
639
640 jtag_add_end_state(TAP_RTI);
641
642 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgrx);
643
644 fields[0].device = xscale->jtag_info.chain_pos;
645 fields[0].num_bits = 3;
646 fields[0].out_value = &field0_out;
647 fields[0].out_mask = NULL;
648 fields[0].in_handler = NULL;
649 if (!xscale->fast_memory_access)
650 {
651 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
652 }
653
654 fields[1].device = xscale->jtag_info.chain_pos;
655 fields[1].num_bits = 32;
656 fields[1].out_value = output;
657 fields[1].out_mask = NULL;
658 fields[1].in_value = NULL;
659 fields[1].in_handler = NULL;
660 fields[1].in_handler_priv = NULL;
661 fields[1].in_check_value = NULL;
662 fields[1].in_check_mask = NULL;
663
664
665
666 fields[2].device = xscale->jtag_info.chain_pos;
667 fields[2].num_bits = 1;
668 fields[2].out_value = &field2;
669 fields[2].out_mask = NULL;
670 fields[2].in_value = NULL;
671 fields[2].in_handler = NULL;
672 if (!xscale->fast_memory_access)
673 {
674 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
675 }
676
677 if (size==4)
678 {
679 int endianness = target->endianness;
680 while (done_count++ < count)
681 {
682 if (endianness == TARGET_LITTLE_ENDIAN)
683 {
684 output[0]=buffer[0];
685 output[1]=buffer[1];
686 output[2]=buffer[2];
687 output[3]=buffer[3];
688 } else
689 {
690 output[0]=buffer[3];
691 output[1]=buffer[2];
692 output[2]=buffer[1];
693 output[3]=buffer[0];
694 }
695 jtag_add_dr_scan(3, fields, TAP_RTI);
696 buffer += size;
697 }
698
699 } else
700 {
701 while (done_count++ < count)
702 {
703 /* extract sized element from target-endian buffer, and put it
704 * into little-endian output buffer
705 */
706 switch (size)
707 {
708 case 2:
709 buf_set_u32(output, 0, 32, target_buffer_get_u16(target, buffer));
710 break;
711 case 1:
712 output[0] = *buffer;
713 break;
714 default:
715 ERROR("BUG: size neither 4, 2 nor 1");
716 exit(-1);
717 }
718
719 jtag_add_dr_scan(3, fields, TAP_RTI);
720 buffer += size;
721 }
722
723 }
724
725 if ((retval = jtag_execute_queue()) != ERROR_OK)
726 {
727 ERROR("JTAG error while sending data to debug handler");
728 return retval;
729 }
730
731 return ERROR_OK;
732 }
733
734 int xscale_send_u32(target_t *target, u32 value)
735 {
736 armv4_5_common_t *armv4_5 = target->arch_info;
737 xscale_common_t *xscale = armv4_5->arch_info;
738
739 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
740 return xscale_write_rx(target);
741 }
742
743 int xscale_write_dcsr(target_t *target, int hold_rst, int ext_dbg_brk)
744 {
745 armv4_5_common_t *armv4_5 = target->arch_info;
746 xscale_common_t *xscale = armv4_5->arch_info;
747
748 int retval;
749
750 scan_field_t fields[3];
751 u8 field0 = 0x0;
752 u8 field0_check_value = 0x2;
753 u8 field0_check_mask = 0x7;
754 u8 field2 = 0x0;
755 u8 field2_check_value = 0x0;
756 u8 field2_check_mask = 0x1;
757
758 if (hold_rst != -1)
759 xscale->hold_rst = hold_rst;
760
761 if (ext_dbg_brk != -1)
762 xscale->external_debug_break = ext_dbg_brk;
763
764 jtag_add_end_state(TAP_RTI);
765 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dcsr);
766
767 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
768 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
769
770 fields[0].device = xscale->jtag_info.chain_pos;
771 fields[0].num_bits = 3;
772 fields[0].out_value = &field0;
773 fields[0].out_mask = NULL;
774 fields[0].in_value = NULL;
775 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
776
777 fields[1].device = xscale->jtag_info.chain_pos;
778 fields[1].num_bits = 32;
779 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
780 fields[1].out_mask = NULL;
781 fields[1].in_value = NULL;
782 fields[1].in_handler = NULL;
783 fields[1].in_handler_priv = NULL;
784 fields[1].in_check_value = NULL;
785 fields[1].in_check_mask = NULL;
786
787
788
789 fields[2].device = xscale->jtag_info.chain_pos;
790 fields[2].num_bits = 1;
791 fields[2].out_value = &field2;
792 fields[2].out_mask = NULL;
793 fields[2].in_value = NULL;
794 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
795
796 jtag_add_dr_scan(3, fields, -1);
797
798 if ((retval = jtag_execute_queue()) != ERROR_OK)
799 {
800 ERROR("JTAG error while writing DCSR");
801 return retval;
802 }
803
804 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
805 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
806
807 return ERROR_OK;
808 }
809
810 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
811 unsigned int parity (unsigned int v)
812 {
813 unsigned int ov = v;
814 v ^= v >> 16;
815 v ^= v >> 8;
816 v ^= v >> 4;
817 v &= 0xf;
818 DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
819 return (0x6996 >> v) & 1;
820 }
821
822 int xscale_load_ic(target_t *target, int mini, u32 va, u32 buffer[8])
823 {
824 armv4_5_common_t *armv4_5 = target->arch_info;
825 xscale_common_t *xscale = armv4_5->arch_info;
826 u8 packet[4];
827 u8 cmd;
828 int word;
829
830 scan_field_t fields[2];
831
832 DEBUG("loading miniIC at 0x%8.8x", va);
833
834 jtag_add_end_state(TAP_RTI);
835 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.ldic); /* LDIC */
836
837 /* CMD is b010 for Main IC and b011 for Mini IC */
838 if (mini)
839 buf_set_u32(&cmd, 0, 3, 0x3);
840 else
841 buf_set_u32(&cmd, 0, 3, 0x2);
842
843 buf_set_u32(&cmd, 3, 3, 0x0);
844
845 /* virtual address of desired cache line */
846 buf_set_u32(packet, 0, 27, va >> 5);
847
848 fields[0].device = xscale->jtag_info.chain_pos;
849 fields[0].num_bits = 6;
850 fields[0].out_value = &cmd;
851 fields[0].out_mask = NULL;
852 fields[0].in_value = NULL;
853 fields[0].in_check_value = NULL;
854 fields[0].in_check_mask = NULL;
855 fields[0].in_handler = NULL;
856 fields[0].in_handler_priv = NULL;
857
858 fields[1].device = xscale->jtag_info.chain_pos;
859 fields[1].num_bits = 27;
860 fields[1].out_value = packet;
861 fields[1].out_mask = NULL;
862 fields[1].in_value = NULL;
863 fields[1].in_check_value = NULL;
864 fields[1].in_check_mask = NULL;
865 fields[1].in_handler = NULL;
866 fields[1].in_handler_priv = NULL;
867
868 jtag_add_dr_scan(2, fields, -1);
869
870 fields[0].num_bits = 32;
871 fields[0].out_value = packet;
872
873 fields[1].num_bits = 1;
874 fields[1].out_value = &cmd;
875
876 for (word = 0; word < 8; word++)
877 {
878 buf_set_u32(packet, 0, 32, buffer[word]);
879 cmd = parity(*((u32*)packet));
880 jtag_add_dr_scan(2, fields, -1);
881 }
882
883 jtag_execute_queue();
884
885 return ERROR_OK;
886 }
887
888 int xscale_invalidate_ic_line(target_t *target, u32 va)
889 {
890 armv4_5_common_t *armv4_5 = target->arch_info;
891 xscale_common_t *xscale = armv4_5->arch_info;
892 u8 packet[4];
893 u8 cmd;
894
895 scan_field_t fields[2];
896
897 jtag_add_end_state(TAP_RTI);
898 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.ldic); /* LDIC */
899
900 /* CMD for invalidate IC line b000, bits [6:4] b000 */
901 buf_set_u32(&cmd, 0, 6, 0x0);
902
903 /* virtual address of desired cache line */
904 buf_set_u32(packet, 0, 27, va >> 5);
905
906 fields[0].device = xscale->jtag_info.chain_pos;
907 fields[0].num_bits = 6;
908 fields[0].out_value = &cmd;
909 fields[0].out_mask = NULL;
910 fields[0].in_value = NULL;
911 fields[0].in_check_value = NULL;
912 fields[0].in_check_mask = NULL;
913 fields[0].in_handler = NULL;
914 fields[0].in_handler_priv = NULL;
915
916 fields[1].device = xscale->jtag_info.chain_pos;
917 fields[1].num_bits = 27;
918 fields[1].out_value = packet;
919 fields[1].out_mask = NULL;
920 fields[1].in_value = NULL;
921 fields[1].in_check_value = NULL;
922 fields[1].in_check_mask = NULL;
923 fields[1].in_handler = NULL;
924 fields[1].in_handler_priv = NULL;
925
926 jtag_add_dr_scan(2, fields, -1);
927
928 return ERROR_OK;
929 }
930
931 int xscale_update_vectors(target_t *target)
932 {
933 armv4_5_common_t *armv4_5 = target->arch_info;
934 xscale_common_t *xscale = armv4_5->arch_info;
935 int i;
936
937 u32 low_reset_branch, high_reset_branch;
938
939 for (i = 1; i < 8; i++)
940 {
941 /* if there's a static vector specified for this exception, override */
942 if (xscale->static_high_vectors_set & (1 << i))
943 {
944 xscale->high_vectors[i] = xscale->static_high_vectors[i];
945 }
946 else
947 {
948 if (target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]) != ERROR_OK)
949 {
950 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
951 }
952 }
953 }
954
955 for (i = 1; i < 8; i++)
956 {
957 if (xscale->static_low_vectors_set & (1 << i))
958 {
959 xscale->low_vectors[i] = xscale->static_low_vectors[i];
960 }
961 else
962 {
963 if (target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]) != ERROR_OK)
964 {
965 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
966 }
967 }
968 }
969
970 /* calculate branches to debug handler */
971 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
972 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
973
974 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
975 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
976
977 /* invalidate and load exception vectors in mini i-cache */
978 xscale_invalidate_ic_line(target, 0x0);
979 xscale_invalidate_ic_line(target, 0xffff0000);
980
981 xscale_load_ic(target, 1, 0x0, xscale->low_vectors);
982 xscale_load_ic(target, 1, 0xffff0000, xscale->high_vectors);
983
984 return ERROR_OK;
985 }
986
987 int xscale_arch_state(struct target_s *target)
988 {
989 armv4_5_common_t *armv4_5 = target->arch_info;
990 xscale_common_t *xscale = armv4_5->arch_info;
991
992 char *state[] =
993 {
994 "disabled", "enabled"
995 };
996
997 char *arch_dbg_reason[] =
998 {
999 "", "\n(processor reset)", "\n(trace buffer full)"
1000 };
1001
1002 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
1003 {
1004 ERROR("BUG: called for a non-ARMv4/5 target");
1005 exit(-1);
1006 }
1007
1008 USER("target halted in %s state due to %s, current mode: %s\n"
1009 "cpsr: 0x%8.8x pc: 0x%8.8x\n"
1010 "MMU: %s, D-Cache: %s, I-Cache: %s"
1011 "%s",
1012 armv4_5_state_strings[armv4_5->core_state],
1013 target_debug_reason_strings[target->debug_reason],
1014 armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)],
1015 buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32),
1016 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32),
1017 state[xscale->armv4_5_mmu.mmu_enabled],
1018 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
1019 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
1020 arch_dbg_reason[xscale->arch_debug_reason]);
1021
1022 return ERROR_OK;
1023 }
1024
1025 int xscale_poll(target_t *target)
1026 {
1027 int retval=ERROR_OK;
1028 armv4_5_common_t *armv4_5 = target->arch_info;
1029 xscale_common_t *xscale = armv4_5->arch_info;
1030
1031 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
1032 {
1033 enum target_state previous_state = target->state;
1034 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
1035 {
1036
1037 /* there's data to read from the tx register, we entered debug state */
1038 xscale->handler_running = 1;
1039
1040 target->state = TARGET_HALTED;
1041
1042 /* process debug entry, fetching current mode regs */
1043 retval = xscale_debug_entry(target);
1044 }
1045 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
1046 {
1047 USER("error while polling TX register, reset CPU");
1048 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
1049 target->state = TARGET_HALTED;
1050 }
1051
1052 /* debug_entry could have overwritten target state (i.e. immediate resume)
1053 * don't signal event handlers in that case
1054 */
1055 if (target->state != TARGET_HALTED)
1056 return ERROR_OK;
1057
1058 /* if target was running, signal that we halted
1059 * otherwise we reentered from debug execution */
1060 if (previous_state == TARGET_RUNNING)
1061 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1062 else
1063 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
1064 }
1065
1066 return retval;
1067 }
1068
1069 int xscale_debug_entry(target_t *target)
1070 {
1071 armv4_5_common_t *armv4_5 = target->arch_info;
1072 xscale_common_t *xscale = armv4_5->arch_info;
1073 u32 pc;
1074 u32 buffer[10];
1075 int i;
1076
1077 u32 moe;
1078
1079 /* clear external dbg break (will be written on next DCSR read) */
1080 xscale->external_debug_break = 0;
1081 xscale_read_dcsr(target);
1082
1083 /* get r0, pc, r1 to r7 and cpsr */
1084 xscale_receive(target, buffer, 10);
1085
1086 /* move r0 from buffer to register cache */
1087 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
1088 armv4_5->core_cache->reg_list[15].dirty = 1;
1089 armv4_5->core_cache->reg_list[15].valid = 1;
1090 DEBUG("r0: 0x%8.8x", buffer[0]);
1091
1092 /* move pc from buffer to register cache */
1093 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
1094 armv4_5->core_cache->reg_list[15].dirty = 1;
1095 armv4_5->core_cache->reg_list[15].valid = 1;
1096 DEBUG("pc: 0x%8.8x", buffer[1]);
1097
1098 /* move data from buffer to register cache */
1099 for (i = 1; i <= 7; i++)
1100 {
1101 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
1102 armv4_5->core_cache->reg_list[i].dirty = 1;
1103 armv4_5->core_cache->reg_list[i].valid = 1;
1104 DEBUG("r%i: 0x%8.8x", i, buffer[i + 1]);
1105 }
1106
1107 buf_set_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32, buffer[9]);
1108 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty = 1;
1109 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].valid = 1;
1110 DEBUG("cpsr: 0x%8.8x", buffer[9]);
1111
1112 armv4_5->core_mode = buffer[9] & 0x1f;
1113 if (armv4_5_mode_to_number(armv4_5->core_mode) == -1)
1114 {
1115 target->state = TARGET_UNKNOWN;
1116 ERROR("cpsr contains invalid mode value - communication failure");
1117 return ERROR_TARGET_FAILURE;
1118 }
1119 DEBUG("target entered debug state in %s mode", armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)]);
1120
1121 if (buffer[9] & 0x20)
1122 armv4_5->core_state = ARMV4_5_STATE_THUMB;
1123 else
1124 armv4_5->core_state = ARMV4_5_STATE_ARM;
1125
1126 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1127 if ((armv4_5->core_mode != ARMV4_5_MODE_USR) && (armv4_5->core_mode != ARMV4_5_MODE_SYS))
1128 {
1129 xscale_receive(target, buffer, 8);
1130 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1131 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).dirty = 0;
1132 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).valid = 1;
1133 }
1134 else
1135 {
1136 /* r8 to r14, but no spsr */
1137 xscale_receive(target, buffer, 7);
1138 }
1139
1140 /* move data from buffer to register cache */
1141 for (i = 8; i <= 14; i++)
1142 {
1143 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).value, 0, 32, buffer[i - 8]);
1144 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 0;
1145 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid = 1;
1146 }
1147
1148 /* examine debug reason */
1149 xscale_read_dcsr(target);
1150 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
1151
1152 /* stored PC (for calculating fixup) */
1153 pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1154
1155 switch (moe)
1156 {
1157 case 0x0: /* Processor reset */
1158 target->debug_reason = DBG_REASON_DBGRQ;
1159 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
1160 pc -= 4;
1161 break;
1162 case 0x1: /* Instruction breakpoint hit */
1163 target->debug_reason = DBG_REASON_BREAKPOINT;
1164 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1165 pc -= 4;
1166 break;
1167 case 0x2: /* Data breakpoint hit */
1168 target->debug_reason = DBG_REASON_WATCHPOINT;
1169 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1170 pc -= 4;
1171 break;
1172 case 0x3: /* BKPT instruction executed */
1173 target->debug_reason = DBG_REASON_BREAKPOINT;
1174 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1175 pc -= 4;
1176 break;
1177 case 0x4: /* Ext. debug event */
1178 target->debug_reason = DBG_REASON_DBGRQ;
1179 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1180 pc -= 4;
1181 break;
1182 case 0x5: /* Vector trap occured */
1183 target->debug_reason = DBG_REASON_BREAKPOINT;
1184 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1185 pc -= 4;
1186 break;
1187 case 0x6: /* Trace buffer full break */
1188 target->debug_reason = DBG_REASON_DBGRQ;
1189 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1190 pc -= 4;
1191 break;
1192 case 0x7: /* Reserved */
1193 default:
1194 ERROR("Method of Entry is 'Reserved'");
1195 exit(-1);
1196 break;
1197 }
1198
1199 /* apply PC fixup */
1200 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
1201
1202 /* on the first debug entry, identify cache type */
1203 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1204 {
1205 u32 cache_type_reg;
1206
1207 /* read cp15 cache type register */
1208 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1209 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1210
1211 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1212 }
1213
1214 /* examine MMU and Cache settings */
1215 /* read cp15 control register */
1216 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1217 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1218 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1219 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1220 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1221
1222 /* tracing enabled, read collected trace data */
1223 if (xscale->trace.buffer_enabled)
1224 {
1225 xscale_read_trace(target);
1226 xscale->trace.buffer_fill--;
1227
1228 /* resume if we're still collecting trace data */
1229 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1230 && (xscale->trace.buffer_fill > 0))
1231 {
1232 xscale_resume(target, 1, 0x0, 1, 0);
1233 }
1234 else
1235 {
1236 xscale->trace.buffer_enabled = 0;
1237 }
1238 }
1239
1240 return ERROR_OK;
1241 }
1242
1243 int xscale_halt(target_t *target)
1244 {
1245 armv4_5_common_t *armv4_5 = target->arch_info;
1246 xscale_common_t *xscale = armv4_5->arch_info;
1247
1248 DEBUG("target->state: %s", target_state_strings[target->state]);
1249
1250 if (target->state == TARGET_HALTED)
1251 {
1252 WARNING("target was already halted");
1253 return ERROR_TARGET_ALREADY_HALTED;
1254 }
1255 else if (target->state == TARGET_UNKNOWN)
1256 {
1257 /* this must not happen for a xscale target */
1258 ERROR("target was in unknown state when halt was requested");
1259 return ERROR_TARGET_INVALID;
1260 }
1261 else if (target->state == TARGET_RESET)
1262 {
1263 DEBUG("target->state == TARGET_RESET");
1264 }
1265 else
1266 {
1267 /* assert external dbg break */
1268 xscale->external_debug_break = 1;
1269 xscale_read_dcsr(target);
1270
1271 target->debug_reason = DBG_REASON_DBGRQ;
1272 }
1273
1274 return ERROR_OK;
1275 }
1276
1277 int xscale_enable_single_step(struct target_s *target, u32 next_pc)
1278 {
1279 armv4_5_common_t *armv4_5 = target->arch_info;
1280 xscale_common_t *xscale= armv4_5->arch_info;
1281 reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1282
1283 if (xscale->ibcr0_used)
1284 {
1285 breakpoint_t *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1286
1287 if (ibcr0_bp)
1288 {
1289 xscale_unset_breakpoint(target, ibcr0_bp);
1290 }
1291 else
1292 {
1293 ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1294 exit(-1);
1295 }
1296 }
1297
1298 xscale_set_reg_u32(ibcr0, next_pc | 0x1);
1299
1300 return ERROR_OK;
1301 }
1302
1303 int xscale_disable_single_step(struct target_s *target)
1304 {
1305 armv4_5_common_t *armv4_5 = target->arch_info;
1306 xscale_common_t *xscale= armv4_5->arch_info;
1307 reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1308
1309 xscale_set_reg_u32(ibcr0, 0x0);
1310
1311 return ERROR_OK;
1312 }
1313
1314 int xscale_resume(struct target_s *target, int current, u32 address, int handle_breakpoints, int debug_execution)
1315 {
1316 armv4_5_common_t *armv4_5 = target->arch_info;
1317 xscale_common_t *xscale= armv4_5->arch_info;
1318 breakpoint_t *breakpoint = target->breakpoints;
1319
1320 u32 current_pc;
1321
1322 int retval;
1323 int i;
1324
1325 DEBUG("-");
1326
1327 if (target->state != TARGET_HALTED)
1328 {
1329 WARNING("target not halted");
1330 return ERROR_TARGET_NOT_HALTED;
1331 }
1332
1333 if (!debug_execution)
1334 {
1335 target_free_all_working_areas(target);
1336 }
1337
1338 /* update vector tables */
1339 xscale_update_vectors(target);
1340
1341 /* current = 1: continue on current pc, otherwise continue at <address> */
1342 if (!current)
1343 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1344
1345 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1346
1347 /* if we're at the reset vector, we have to simulate the branch */
1348 if (current_pc == 0x0)
1349 {
1350 arm_simulate_step(target, NULL);
1351 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1352 }
1353
1354 /* the front-end may request us not to handle breakpoints */
1355 if (handle_breakpoints)
1356 {
1357 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1358 {
1359 u32 next_pc;
1360
1361 /* there's a breakpoint at the current PC, we have to step over it */
1362 DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1363 xscale_unset_breakpoint(target, breakpoint);
1364
1365 /* calculate PC of next instruction */
1366 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1367 {
1368 u32 current_opcode;
1369 target_read_u32(target, current_pc, &current_opcode);
1370 ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8x", current_opcode);
1371 }
1372
1373 DEBUG("enable single-step");
1374 xscale_enable_single_step(target, next_pc);
1375
1376 /* restore banked registers */
1377 xscale_restore_context(target);
1378
1379 /* send resume request (command 0x30 or 0x31)
1380 * clean the trace buffer if it is to be enabled (0x62) */
1381 if (xscale->trace.buffer_enabled)
1382 {
1383 xscale_send_u32(target, 0x62);
1384 xscale_send_u32(target, 0x31);
1385 }
1386 else
1387 xscale_send_u32(target, 0x30);
1388
1389 /* send CPSR */
1390 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1391 DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1392
1393 for (i = 7; i >= 0; i--)
1394 {
1395 /* send register */
1396 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1397 DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1398 }
1399
1400 /* send PC */
1401 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1402 DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1403
1404 /* wait for and process debug entry */
1405 xscale_debug_entry(target);
1406
1407 DEBUG("disable single-step");
1408 xscale_disable_single_step(target);
1409
1410 DEBUG("set breakpoint at 0x%8.8x", breakpoint->address);
1411 xscale_set_breakpoint(target, breakpoint);
1412 }
1413 }
1414
1415 /* enable any pending breakpoints and watchpoints */
1416 xscale_enable_breakpoints(target);
1417 xscale_enable_watchpoints(target);
1418
1419 /* restore banked registers */
1420 xscale_restore_context(target);
1421
1422 /* send resume request (command 0x30 or 0x31)
1423 * clean the trace buffer if it is to be enabled (0x62) */
1424 if (xscale->trace.buffer_enabled)
1425 {
1426 xscale_send_u32(target, 0x62);
1427 xscale_send_u32(target, 0x31);
1428 }
1429 else
1430 xscale_send_u32(target, 0x30);
1431
1432 /* send CPSR */
1433 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1434 DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1435
1436 for (i = 7; i >= 0; i--)
1437 {
1438 /* send register */
1439 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1440 DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1441 }
1442
1443 /* send PC */
1444 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1445 DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1446
1447 target->debug_reason = DBG_REASON_NOTHALTED;
1448
1449 if (!debug_execution)
1450 {
1451 /* registers are now invalid */
1452 armv4_5_invalidate_core_regs(target);
1453 target->state = TARGET_RUNNING;
1454 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1455 }
1456 else
1457 {
1458 target->state = TARGET_DEBUG_RUNNING;
1459 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1460 }
1461
1462 DEBUG("target resumed");
1463
1464 xscale->handler_running = 1;
1465
1466 return ERROR_OK;
1467 }
1468
1469 int xscale_step(struct target_s *target, int current, u32 address, int handle_breakpoints)
1470 {
1471 armv4_5_common_t *armv4_5 = target->arch_info;
1472 xscale_common_t *xscale = armv4_5->arch_info;
1473 breakpoint_t *breakpoint = target->breakpoints;
1474
1475 u32 current_pc, next_pc;
1476 int i;
1477 int retval;
1478
1479 if (target->state != TARGET_HALTED)
1480 {
1481 WARNING("target not halted");
1482 return ERROR_TARGET_NOT_HALTED;
1483 }
1484
1485 /* current = 1: continue on current pc, otherwise continue at <address> */
1486 if (!current)
1487 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
1488
1489 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1490
1491 /* if we're at the reset vector, we have to simulate the step */
1492 if (current_pc == 0x0)
1493 {
1494 arm_simulate_step(target, NULL);
1495 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
1496
1497 target->debug_reason = DBG_REASON_SINGLESTEP;
1498 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1499
1500 return ERROR_OK;
1501 }
1502
1503 /* the front-end may request us not to handle breakpoints */
1504 if (handle_breakpoints)
1505 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
1506 {
1507 xscale_unset_breakpoint(target, breakpoint);
1508 }
1509
1510 target->debug_reason = DBG_REASON_SINGLESTEP;
1511
1512 /* calculate PC of next instruction */
1513 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1514 {
1515 u32 current_opcode;
1516 target_read_u32(target, current_pc, &current_opcode);
1517 ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8x", current_opcode);
1518 }
1519
1520 DEBUG("enable single-step");
1521 xscale_enable_single_step(target, next_pc);
1522
1523 /* restore banked registers */
1524 xscale_restore_context(target);
1525
1526 /* send resume request (command 0x30 or 0x31)
1527 * clean the trace buffer if it is to be enabled (0x62) */
1528 if (xscale->trace.buffer_enabled)
1529 {
1530 xscale_send_u32(target, 0x62);
1531 xscale_send_u32(target, 0x31);
1532 }
1533 else
1534 xscale_send_u32(target, 0x30);
1535
1536 /* send CPSR */
1537 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1538 DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
1539
1540 for (i = 7; i >= 0; i--)
1541 {
1542 /* send register */
1543 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1544 DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1545 }
1546
1547 /* send PC */
1548 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1549 DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1550
1551 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1552
1553 /* registers are now invalid */
1554 armv4_5_invalidate_core_regs(target);
1555
1556 /* wait for and process debug entry */
1557 xscale_debug_entry(target);
1558
1559 DEBUG("disable single-step");
1560 xscale_disable_single_step(target);
1561
1562 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1563
1564 if (breakpoint)
1565 {
1566 xscale_set_breakpoint(target, breakpoint);
1567 }
1568
1569 DEBUG("target stepped");
1570
1571 return ERROR_OK;
1572
1573 }
1574
1575 int xscale_assert_reset(target_t *target)
1576 {
1577 armv4_5_common_t *armv4_5 = target->arch_info;
1578 xscale_common_t *xscale = armv4_5->arch_info;
1579
1580 DEBUG("target->state: %s", target_state_strings[target->state]);
1581
1582 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1583 * end up in T-L-R, which would reset JTAG
1584 */
1585 jtag_add_end_state(TAP_RTI);
1586 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dcsr);
1587
1588 /* set Hold reset, Halt mode and Trap Reset */
1589 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1590 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1591 xscale_write_dcsr(target, 1, 0);
1592
1593 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1594 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, 0x7f);
1595 jtag_execute_queue();
1596
1597 /* assert reset */
1598 jtag_add_reset(0, 1);
1599
1600 /* sleep 1ms, to be sure we fulfill any requirements */
1601 jtag_add_sleep(1000);
1602 jtag_execute_queue();
1603
1604 target->state = TARGET_RESET;
1605
1606 return ERROR_OK;
1607 }
1608
1609 int xscale_deassert_reset(target_t *target)
1610 {
1611 armv4_5_common_t *armv4_5 = target->arch_info;
1612 xscale_common_t *xscale = armv4_5->arch_info;
1613
1614 fileio_t debug_handler;
1615 u32 address;
1616 u32 binary_size;
1617
1618 u32 buf_cnt;
1619 int i;
1620 int retval;
1621
1622 breakpoint_t *breakpoint = target->breakpoints;
1623
1624 DEBUG("-");
1625
1626 xscale->ibcr_available = 2;
1627 xscale->ibcr0_used = 0;
1628 xscale->ibcr1_used = 0;
1629
1630 xscale->dbr_available = 2;
1631 xscale->dbr0_used = 0;
1632 xscale->dbr1_used = 0;
1633
1634 /* mark all hardware breakpoints as unset */
1635 while (breakpoint)
1636 {
1637 if (breakpoint->type == BKPT_HARD)
1638 {
1639 breakpoint->set = 0;
1640 }
1641 breakpoint = breakpoint->next;
1642 }
1643
1644 if (!xscale->handler_installed)
1645 {
1646 /* release SRST */
1647 jtag_add_reset(0, 0);
1648
1649 /* wait 300ms; 150 and 100ms were not enough */
1650 jtag_add_sleep(300*1000);
1651
1652 jtag_add_runtest(2030, TAP_RTI);
1653 jtag_execute_queue();
1654
1655 /* set Hold reset, Halt mode and Trap Reset */
1656 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1657 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1658 xscale_write_dcsr(target, 1, 0);
1659
1660 /* Load debug handler */
1661 if (fileio_open(&debug_handler, "xscale/debug_handler.bin", FILEIO_READ, FILEIO_BINARY) != ERROR_OK)
1662 {
1663 return ERROR_OK;
1664 }
1665
1666 if ((binary_size = debug_handler.size) % 4)
1667 {
1668 ERROR("debug_handler.bin: size not a multiple of 4");
1669 exit(-1);
1670 }
1671
1672 if (binary_size > 0x800)
1673 {
1674 ERROR("debug_handler.bin: larger than 2kb");
1675 exit(-1);
1676 }
1677
1678 binary_size = CEIL(binary_size, 32) * 32;
1679
1680 address = xscale->handler_address;
1681 while (binary_size > 0)
1682 {
1683 u32 cache_line[8];
1684 u8 buffer[32];
1685
1686 if ((retval = fileio_read(&debug_handler, 32, buffer, &buf_cnt)) != ERROR_OK)
1687 {
1688
1689 }
1690
1691 for (i = 0; i < buf_cnt; i += 4)
1692 {
1693 /* convert LE buffer to host-endian u32 */
1694 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1695 }
1696
1697 for (; buf_cnt < 32; buf_cnt += 4)
1698 {
1699 cache_line[buf_cnt / 4] = 0xe1a08008;
1700 }
1701
1702 /* only load addresses other than the reset vectors */
1703 if ((address % 0x400) != 0x0)
1704 {
1705 xscale_load_ic(target, 1, address, cache_line);
1706 }
1707
1708 address += buf_cnt;
1709 binary_size -= buf_cnt;
1710 };
1711
1712 xscale_load_ic(target, 1, 0x0, xscale->low_vectors);
1713 xscale_load_ic(target, 1, 0xffff0000, xscale->high_vectors);
1714
1715 jtag_add_runtest(30, TAP_RTI);
1716
1717 jtag_add_sleep(100000);
1718
1719 /* set Hold reset, Halt mode and Trap Reset */
1720 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1721 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1722 xscale_write_dcsr(target, 1, 0);
1723
1724 /* clear Hold reset to let the target run (should enter debug handler) */
1725 xscale_write_dcsr(target, 0, 1);
1726 target->state = TARGET_RUNNING;
1727
1728 if ((target->reset_mode != RESET_HALT) && (target->reset_mode != RESET_INIT))
1729 {
1730 jtag_add_sleep(10000);
1731
1732 /* we should have entered debug now */
1733 xscale_debug_entry(target);
1734 target->state = TARGET_HALTED;
1735
1736 /* resume the target */
1737 xscale_resume(target, 1, 0x0, 1, 0);
1738 }
1739
1740 fileio_close(&debug_handler);
1741 }
1742 else
1743 {
1744 jtag_add_reset(0, 0);
1745 }
1746
1747
1748 return ERROR_OK;
1749 }
1750
1751 int xscale_soft_reset_halt(struct target_s *target)
1752 {
1753
1754 return ERROR_OK;
1755 }
1756
1757 int xscale_prepare_reset_halt(struct target_s *target)
1758 {
1759 /* nothing to be done for reset_halt on XScale targets
1760 * we always halt after a reset to upload the debug handler
1761 */
1762 return ERROR_OK;
1763 }
1764
1765 int xscale_read_core_reg(struct target_s *target, int num, enum armv4_5_mode mode)
1766 {
1767
1768 return ERROR_OK;
1769 }
1770
1771 int xscale_write_core_reg(struct target_s *target, int num, enum armv4_5_mode mode, u32 value)
1772 {
1773
1774 return ERROR_OK;
1775 }
1776
1777 int xscale_full_context(target_t *target)
1778 {
1779 armv4_5_common_t *armv4_5 = target->arch_info;
1780
1781 u32 *buffer;
1782
1783 int i, j;
1784
1785 DEBUG("-");
1786
1787 if (target->state != TARGET_HALTED)
1788 {
1789 WARNING("target not halted");
1790 return ERROR_TARGET_NOT_HALTED;
1791 }
1792
1793 buffer = malloc(4 * 8);
1794
1795 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1796 * we can't enter User mode on an XScale (unpredictable),
1797 * but User shares registers with SYS
1798 */
1799 for(i = 1; i < 7; i++)
1800 {
1801 int valid = 1;
1802
1803 /* check if there are invalid registers in the current mode
1804 */
1805 for (j = 0; j <= 16; j++)
1806 {
1807 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid == 0)
1808 valid = 0;
1809 }
1810
1811 if (!valid)
1812 {
1813 u32 tmp_cpsr;
1814
1815 /* request banked registers */
1816 xscale_send_u32(target, 0x0);
1817
1818 tmp_cpsr = 0x0;
1819 tmp_cpsr |= armv4_5_number_to_mode(i);
1820 tmp_cpsr |= 0xc0; /* I/F bits */
1821
1822 /* send CPSR for desired mode */
1823 xscale_send_u32(target, tmp_cpsr);
1824
1825 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1826 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1827 {
1828 xscale_receive(target, buffer, 8);
1829 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
1830 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1831 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).valid = 1;
1832 }
1833 else
1834 {
1835 xscale_receive(target, buffer, 7);
1836 }
1837
1838 /* move data from buffer to register cache */
1839 for (j = 8; j <= 14; j++)
1840 {
1841 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).value, 0, 32, buffer[j - 8]);
1842 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1843 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid = 1;
1844 }
1845 }
1846 }
1847
1848 free(buffer);
1849
1850 return ERROR_OK;
1851 }
1852
1853 int xscale_restore_context(target_t *target)
1854 {
1855 armv4_5_common_t *armv4_5 = target->arch_info;
1856
1857 int i, j;
1858
1859 DEBUG("-");
1860
1861 if (target->state != TARGET_HALTED)
1862 {
1863 WARNING("target not halted");
1864 return ERROR_TARGET_NOT_HALTED;
1865 }
1866
1867 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1868 * we can't enter User mode on an XScale (unpredictable),
1869 * but User shares registers with SYS
1870 */
1871 for(i = 1; i < 7; i++)
1872 {
1873 int dirty = 0;
1874
1875 /* check if there are invalid registers in the current mode
1876 */
1877 for (j = 8; j <= 14; j++)
1878 {
1879 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty == 1)
1880 dirty = 1;
1881 }
1882
1883 /* if not USR/SYS, check if the SPSR needs to be written */
1884 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1885 {
1886 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty == 1)
1887 dirty = 1;
1888 }
1889
1890 if (dirty)
1891 {
1892 u32 tmp_cpsr;
1893
1894 /* send banked registers */
1895 xscale_send_u32(target, 0x1);
1896
1897 tmp_cpsr = 0x0;
1898 tmp_cpsr |= armv4_5_number_to_mode(i);
1899 tmp_cpsr |= 0xc0; /* I/F bits */
1900
1901 /* send CPSR for desired mode */
1902 xscale_send_u32(target, tmp_cpsr);
1903
1904 /* send banked registers, r8 to r14, and spsr if not in USR/SYS mode */
1905 for (j = 8; j <= 14; j++)
1906 {
1907 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, j).value, 0, 32));
1908 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
1909 }
1910
1911 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
1912 {
1913 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32));
1914 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
1915 }
1916 }
1917 }
1918
1919 return ERROR_OK;
1920 }
1921
1922 int xscale_read_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer)
1923 {
1924 armv4_5_common_t *armv4_5 = target->arch_info;
1925 xscale_common_t *xscale = armv4_5->arch_info;
1926 u32 *buf32;
1927 int i;
1928
1929 DEBUG("address: 0x%8.8x, size: 0x%8.8x, count: 0x%8.8x", address, size, count);
1930
1931 if (target->state != TARGET_HALTED)
1932 {
1933 WARNING("target not halted");
1934 return ERROR_TARGET_NOT_HALTED;
1935 }
1936
1937 /* sanitize arguments */
1938 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1939 return ERROR_INVALID_ARGUMENTS;
1940
1941 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1942 return ERROR_TARGET_UNALIGNED_ACCESS;
1943
1944 /* send memory read request (command 0x1n, n: access size) */
1945 xscale_send_u32(target, 0x10 | size);
1946
1947 /* send base address for read request */
1948 xscale_send_u32(target, address);
1949
1950 /* send number of requested data words */
1951 xscale_send_u32(target, count);
1952
1953 /* receive data from target (count times 32-bit words in host endianness) */
1954 buf32 = malloc(4 * count);
1955 xscale_receive(target, buf32, count);
1956
1957 /* extract data from host-endian buffer into byte stream */
1958 for (i = 0; i < count; i++)
1959 {
1960 switch (size)
1961 {
1962 case 4:
1963 target_buffer_set_u32(target, buffer, buf32[i]);
1964 buffer += 4;
1965 break;
1966 case 2:
1967 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1968 buffer += 2;
1969 break;
1970 case 1:
1971 *buffer++ = buf32[i] & 0xff;
1972 break;
1973 default:
1974 ERROR("should never get here");
1975 exit(-1);
1976 }
1977 }
1978
1979 free(buf32);
1980
1981 /* examine DCSR, to see if Sticky Abort (SA) got set */
1982 xscale_read_dcsr(target);
1983 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1984 {
1985 /* clear SA bit */
1986 xscale_send_u32(target, 0x60);
1987
1988 return ERROR_TARGET_DATA_ABORT;
1989 }
1990
1991 return ERROR_OK;
1992 }
1993
1994 int xscale_write_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer)
1995 {
1996 armv4_5_common_t *armv4_5 = target->arch_info;
1997 xscale_common_t *xscale = armv4_5->arch_info;
1998
1999 DEBUG("address: 0x%8.8x, size: 0x%8.8x, count: 0x%8.8x", address, size, count);
2000
2001 if (target->state != TARGET_HALTED)
2002 {
2003 WARNING("target not halted");
2004 return ERROR_TARGET_NOT_HALTED;
2005 }
2006
2007 /* sanitize arguments */
2008 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
2009 return ERROR_INVALID_ARGUMENTS;
2010
2011 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
2012 return ERROR_TARGET_UNALIGNED_ACCESS;
2013
2014 /* send memory write request (command 0x2n, n: access size) */
2015 xscale_send_u32(target, 0x20 | size);
2016
2017 /* send base address for read request */
2018 xscale_send_u32(target, address);
2019
2020 /* send number of requested data words to be written*/
2021 xscale_send_u32(target, count);
2022
2023 /* extract data from host-endian buffer into byte stream */
2024 #if 0
2025 for (i = 0; i < count; i++)
2026 {
2027 switch (size)
2028 {
2029 case 4:
2030 value = target_buffer_get_u32(target, buffer);
2031 xscale_send_u32(target, value);
2032 buffer += 4;
2033 break;
2034 case 2:
2035 value = target_buffer_get_u16(target, buffer);
2036 xscale_send_u32(target, value);
2037 buffer += 2;
2038 break;
2039 case 1:
2040 value = *buffer;
2041 xscale_send_u32(target, value);
2042 buffer += 1;
2043 break;
2044 default:
2045 ERROR("should never get here");
2046 exit(-1);
2047 }
2048 }
2049 #endif
2050 xscale_send(target, buffer, count, size);
2051
2052 /* examine DCSR, to see if Sticky Abort (SA) got set */
2053 xscale_read_dcsr(target);
2054 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
2055 {
2056 /* clear SA bit */
2057 xscale_send_u32(target, 0x60);
2058
2059 return ERROR_TARGET_DATA_ABORT;
2060 }
2061
2062 return ERROR_OK;
2063 }
2064
2065 int xscale_bulk_write_memory(target_t *target, u32 address, u32 count, u8 *buffer)
2066 {
2067 return xscale_write_memory(target, address, 4, count, buffer);
2068 }
2069
2070 int xscale_checksum_memory(struct target_s *target, u32 address, u32 count, u32* checksum)
2071 {
2072 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2073 }
2074
2075 u32 xscale_get_ttb(target_t *target)
2076 {
2077 armv4_5_common_t *armv4_5 = target->arch_info;
2078 xscale_common_t *xscale = armv4_5->arch_info;
2079 u32 ttb;
2080
2081 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
2082 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
2083
2084 return ttb;
2085 }
2086
2087 void xscale_disable_mmu_caches(target_t *target, int mmu, int d_u_cache, int i_cache)
2088 {
2089 armv4_5_common_t *armv4_5 = target->arch_info;
2090 xscale_common_t *xscale = armv4_5->arch_info;
2091 u32 cp15_control;
2092
2093 /* read cp15 control register */
2094 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2095 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2096
2097 if (mmu)
2098 cp15_control &= ~0x1U;
2099
2100 if (d_u_cache)
2101 {
2102 /* clean DCache */
2103 xscale_send_u32(target, 0x50);
2104 xscale_send_u32(target, xscale->cache_clean_address);
2105
2106 /* invalidate DCache */
2107 xscale_send_u32(target, 0x51);
2108
2109 cp15_control &= ~0x4U;
2110 }
2111
2112 if (i_cache)
2113 {
2114 /* invalidate ICache */
2115 xscale_send_u32(target, 0x52);
2116 cp15_control &= ~0x1000U;
2117 }
2118
2119 /* write new cp15 control register */
2120 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2121
2122 /* execute cpwait to ensure outstanding operations complete */
2123 xscale_send_u32(target, 0x53);
2124 }
2125
2126 void xscale_enable_mmu_caches(target_t *target, int mmu, int d_u_cache, int i_cache)
2127 {
2128 armv4_5_common_t *armv4_5 = target->arch_info;
2129 xscale_common_t *xscale = armv4_5->arch_info;
2130 u32 cp15_control;
2131
2132 /* read cp15 control register */
2133 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2134 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2135
2136 if (mmu)
2137 cp15_control |= 0x1U;
2138
2139 if (d_u_cache)
2140 cp15_control |= 0x4U;
2141
2142 if (i_cache)
2143 cp15_control |= 0x1000U;
2144
2145 /* write new cp15 control register */
2146 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2147
2148 /* execute cpwait to ensure outstanding operations complete */
2149 xscale_send_u32(target, 0x53);
2150 }
2151
2152 int xscale_set_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2153 {
2154 armv4_5_common_t *armv4_5 = target->arch_info;
2155 xscale_common_t *xscale = armv4_5->arch_info;
2156
2157 if (target->state != TARGET_HALTED)
2158 {
2159 WARNING("target not halted");
2160 return ERROR_TARGET_NOT_HALTED;
2161 }
2162
2163 if (xscale->force_hw_bkpts)
2164 breakpoint->type = BKPT_HARD;
2165
2166 if (breakpoint->set)
2167 {
2168 WARNING("breakpoint already set");
2169 return ERROR_OK;
2170 }
2171
2172 if (breakpoint->type == BKPT_HARD)
2173 {
2174 u32 value = breakpoint->address | 1;
2175 if (!xscale->ibcr0_used)
2176 {
2177 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2178 xscale->ibcr0_used = 1;
2179 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2180 }
2181 else if (!xscale->ibcr1_used)
2182 {
2183 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2184 xscale->ibcr1_used = 1;
2185 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2186 }
2187 else
2188 {
2189 ERROR("BUG: no hardware comparator available");
2190 return ERROR_OK;
2191 }
2192 }
2193 else if (breakpoint->type == BKPT_SOFT)
2194 {
2195 if (breakpoint->length == 4)
2196 {
2197 /* keep the original instruction in target endianness */
2198 target->type->read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr);
2199 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2200 target_write_u32(target, breakpoint->address, xscale->arm_bkpt);
2201 }
2202 else
2203 {
2204 /* keep the original instruction in target endianness */
2205 target->type->read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr);
2206 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2207 target_write_u32(target, breakpoint->address, xscale->thumb_bkpt);
2208 }
2209 breakpoint->set = 1;
2210 }
2211
2212 return ERROR_OK;
2213
2214 }
2215
2216 int xscale_add_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2217 {
2218 armv4_5_common_t *armv4_5 = target->arch_info;
2219 xscale_common_t *xscale = armv4_5->arch_info;
2220
2221 if (target->state != TARGET_HALTED)
2222 {
2223 WARNING("target not halted");
2224 return ERROR_TARGET_NOT_HALTED;
2225 }
2226
2227 if (xscale->force_hw_bkpts)
2228 {
2229 DEBUG("forcing use of hardware breakpoint at address 0x%8.8x", breakpoint->address);
2230 breakpoint->type = BKPT_HARD;
2231 }
2232
2233 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2234 {
2235 INFO("no breakpoint unit available for hardware breakpoint");
2236 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2237 }
2238 else
2239 {
2240 xscale->ibcr_available--;
2241 }
2242
2243 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2244 {
2245 INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2246 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2247 }
2248
2249 return ERROR_OK;
2250 }
2251
2252 int xscale_unset_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2253 {
2254 armv4_5_common_t *armv4_5 = target->arch_info;
2255 xscale_common_t *xscale = armv4_5->arch_info;
2256
2257 if (target->state != TARGET_HALTED)
2258 {
2259 WARNING("target not halted");
2260 return ERROR_TARGET_NOT_HALTED;
2261 }
2262
2263 if (!breakpoint->set)
2264 {
2265 WARNING("breakpoint not set");
2266 return ERROR_OK;
2267 }
2268
2269 if (breakpoint->type == BKPT_HARD)
2270 {
2271 if (breakpoint->set == 1)
2272 {
2273 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2274 xscale->ibcr0_used = 0;
2275 }
2276 else if (breakpoint->set == 2)
2277 {
2278 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2279 xscale->ibcr1_used = 0;
2280 }
2281 breakpoint->set = 0;
2282 }
2283 else
2284 {
2285 /* restore original instruction (kept in target endianness) */
2286 if (breakpoint->length == 4)
2287 {
2288 target->type->write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr);
2289 }
2290 else
2291 {
2292 target->type->write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr);
2293 }
2294 breakpoint->set = 0;
2295 }
2296
2297 return ERROR_OK;
2298 }
2299
2300 int xscale_remove_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
2301 {
2302 armv4_5_common_t *armv4_5 = target->arch_info;
2303 xscale_common_t *xscale = armv4_5->arch_info;
2304
2305 if (target->state != TARGET_HALTED)
2306 {
2307 WARNING("target not halted");
2308 return ERROR_TARGET_NOT_HALTED;
2309 }
2310
2311 if (breakpoint->set)
2312 {
2313 xscale_unset_breakpoint(target, breakpoint);
2314 }
2315
2316 if (breakpoint->type == BKPT_HARD)
2317 xscale->ibcr_available++;
2318
2319 return ERROR_OK;
2320 }
2321
2322 int xscale_set_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2323 {
2324 armv4_5_common_t *armv4_5 = target->arch_info;
2325 xscale_common_t *xscale = armv4_5->arch_info;
2326 u8 enable=0;
2327 reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2328 u32 dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2329
2330 if (target->state != TARGET_HALTED)
2331 {
2332 WARNING("target not halted");
2333 return ERROR_TARGET_NOT_HALTED;
2334 }
2335
2336 xscale_get_reg(dbcon);
2337
2338 switch (watchpoint->rw)
2339 {
2340 case WPT_READ:
2341 enable = 0x3;
2342 break;
2343 case WPT_ACCESS:
2344 enable = 0x2;
2345 break;
2346 case WPT_WRITE:
2347 enable = 0x1;
2348 break;
2349 default:
2350 ERROR("BUG: watchpoint->rw neither read, write nor access");
2351 }
2352
2353 if (!xscale->dbr0_used)
2354 {
2355 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2356 dbcon_value |= enable;
2357 xscale_set_reg_u32(dbcon, dbcon_value);
2358 watchpoint->set = 1;
2359 xscale->dbr0_used = 1;
2360 }
2361 else if (!xscale->dbr1_used)
2362 {
2363 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2364 dbcon_value |= enable << 2;
2365 xscale_set_reg_u32(dbcon, dbcon_value);
2366 watchpoint->set = 2;
2367 xscale->dbr1_used = 1;
2368 }
2369 else
2370 {
2371 ERROR("BUG: no hardware comparator available");
2372 return ERROR_OK;
2373 }
2374
2375 return ERROR_OK;
2376 }
2377
2378 int xscale_add_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2379 {
2380 armv4_5_common_t *armv4_5 = target->arch_info;
2381 xscale_common_t *xscale = armv4_5->arch_info;
2382
2383 if (target->state != TARGET_HALTED)
2384 {
2385 WARNING("target not halted");
2386 return ERROR_TARGET_NOT_HALTED;
2387 }
2388
2389 if (xscale->dbr_available < 1)
2390 {
2391 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2392 }
2393
2394 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2395 {
2396 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2397 }
2398
2399 xscale->dbr_available--;
2400
2401 return ERROR_OK;
2402 }
2403
2404 int xscale_unset_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2405 {
2406 armv4_5_common_t *armv4_5 = target->arch_info;
2407 xscale_common_t *xscale = armv4_5->arch_info;
2408 reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2409 u32 dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2410
2411 if (target->state != TARGET_HALTED)
2412 {
2413 WARNING("target not halted");
2414 return ERROR_TARGET_NOT_HALTED;
2415 }
2416
2417 if (!watchpoint->set)
2418 {
2419 WARNING("breakpoint not set");
2420 return ERROR_OK;
2421 }
2422
2423 if (watchpoint->set == 1)
2424 {
2425 dbcon_value &= ~0x3;
2426 xscale_set_reg_u32(dbcon, dbcon_value);
2427 xscale->dbr0_used = 0;
2428 }
2429 else if (watchpoint->set == 2)
2430 {
2431 dbcon_value &= ~0xc;
2432 xscale_set_reg_u32(dbcon, dbcon_value);
2433 xscale->dbr1_used = 0;
2434 }
2435 watchpoint->set = 0;
2436
2437 return ERROR_OK;
2438 }
2439
2440 int xscale_remove_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
2441 {
2442 armv4_5_common_t *armv4_5 = target->arch_info;
2443 xscale_common_t *xscale = armv4_5->arch_info;
2444
2445 if (target->state != TARGET_HALTED)
2446 {
2447 WARNING("target not halted");
2448 return ERROR_TARGET_NOT_HALTED;
2449 }
2450
2451 if (watchpoint->set)
2452 {
2453 xscale_unset_watchpoint(target, watchpoint);
2454 }
2455
2456 xscale->dbr_available++;
2457
2458 return ERROR_OK;
2459 }
2460
2461 void xscale_enable_watchpoints(struct target_s *target)
2462 {
2463 watchpoint_t *watchpoint = target->watchpoints;
2464
2465 while (watchpoint)
2466 {
2467 if (watchpoint->set == 0)
2468 xscale_set_watchpoint(target, watchpoint);
2469 watchpoint = watchpoint->next;
2470 }
2471 }
2472
2473 void xscale_enable_breakpoints(struct target_s *target)
2474 {
2475 breakpoint_t *breakpoint = target->breakpoints;
2476
2477 /* set any pending breakpoints */
2478 while (breakpoint)
2479 {
2480 if (breakpoint->set == 0)
2481 xscale_set_breakpoint(target, breakpoint);
2482 breakpoint = breakpoint->next;
2483 }
2484 }
2485
2486 int xscale_get_reg(reg_t *reg)
2487 {
2488 xscale_reg_t *arch_info = reg->arch_info;
2489 target_t *target = arch_info->target;
2490 armv4_5_common_t *armv4_5 = target->arch_info;
2491 xscale_common_t *xscale = armv4_5->arch_info;
2492
2493 /* DCSR, TX and RX are accessible via JTAG */
2494 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2495 {
2496 return xscale_read_dcsr(arch_info->target);
2497 }
2498 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2499 {
2500 /* 1 = consume register content */
2501 return xscale_read_tx(arch_info->target, 1);
2502 }
2503 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2504 {
2505 /* can't read from RX register (host -> debug handler) */
2506 return ERROR_OK;
2507 }
2508 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2509 {
2510 /* can't (explicitly) read from TXRXCTRL register */
2511 return ERROR_OK;
2512 }
2513 else /* Other DBG registers have to be transfered by the debug handler */
2514 {
2515 /* send CP read request (command 0x40) */
2516 xscale_send_u32(target, 0x40);
2517
2518 /* send CP register number */
2519 xscale_send_u32(target, arch_info->dbg_handler_number);
2520
2521 /* read register value */
2522 xscale_read_tx(target, 1);
2523 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2524
2525 reg->dirty = 0;
2526 reg->valid = 1;
2527 }
2528
2529 return ERROR_OK;
2530 }
2531
2532 int xscale_set_reg(reg_t *reg, u8* buf)
2533 {
2534 xscale_reg_t *arch_info = reg->arch_info;
2535 target_t *target = arch_info->target;
2536 armv4_5_common_t *armv4_5 = target->arch_info;
2537 xscale_common_t *xscale = armv4_5->arch_info;
2538 u32 value = buf_get_u32(buf, 0, 32);
2539
2540 /* DCSR, TX and RX are accessible via JTAG */
2541 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2542 {
2543 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2544 return xscale_write_dcsr(arch_info->target, -1, -1);
2545 }
2546 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2547 {
2548 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2549 return xscale_write_rx(arch_info->target);
2550 }
2551 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2552 {
2553 /* can't write to TX register (debug-handler -> host) */
2554 return ERROR_OK;
2555 }
2556 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2557 {
2558 /* can't (explicitly) write to TXRXCTRL register */
2559 return ERROR_OK;
2560 }
2561 else /* Other DBG registers have to be transfered by the debug handler */
2562 {
2563 /* send CP write request (command 0x41) */
2564 xscale_send_u32(target, 0x41);
2565
2566 /* send CP register number */
2567 xscale_send_u32(target, arch_info->dbg_handler_number);
2568
2569 /* send CP register value */
2570 xscale_send_u32(target, value);
2571 buf_set_u32(reg->value, 0, 32, value);
2572 }
2573
2574 return ERROR_OK;
2575 }
2576
2577 /* convenience wrapper to access XScale specific registers */
2578 int xscale_set_reg_u32(reg_t *reg, u32 value)
2579 {
2580 u8 buf[4];
2581
2582 buf_set_u32(buf, 0, 32, value);
2583
2584 return xscale_set_reg(reg, buf);
2585 }
2586
2587 int xscale_write_dcsr_sw(target_t *target, u32 value)
2588 {
2589 /* get pointers to arch-specific information */
2590 armv4_5_common_t *armv4_5 = target->arch_info;
2591 xscale_common_t *xscale = armv4_5->arch_info;
2592 reg_t *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2593 xscale_reg_t *dcsr_arch_info = dcsr->arch_info;
2594
2595 /* send CP write request (command 0x41) */
2596 xscale_send_u32(target, 0x41);
2597
2598 /* send CP register number */
2599 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2600
2601 /* send CP register value */
2602 xscale_send_u32(target, value);
2603 buf_set_u32(dcsr->value, 0, 32, value);
2604
2605 return ERROR_OK;
2606 }
2607
2608 int xscale_read_trace(target_t *target)
2609 {
2610 /* get pointers to arch-specific information */
2611 armv4_5_common_t *armv4_5 = target->arch_info;
2612 xscale_common_t *xscale = armv4_5->arch_info;
2613 xscale_trace_data_t **trace_data_p;
2614
2615 /* 258 words from debug handler
2616 * 256 trace buffer entries
2617 * 2 checkpoint addresses
2618 */
2619 u32 trace_buffer[258];
2620 int is_address[256];
2621 int i, j;
2622
2623 if (target->state != TARGET_HALTED)
2624 {
2625 WARNING("target must be stopped to read trace data");
2626 return ERROR_TARGET_NOT_HALTED;
2627 }
2628
2629 /* send read trace buffer command (command 0x61) */
2630 xscale_send_u32(target, 0x61);
2631
2632 /* receive trace buffer content */
2633 xscale_receive(target, trace_buffer, 258);
2634
2635 /* parse buffer backwards to identify address entries */
2636 for (i = 255; i >= 0; i--)
2637 {
2638 is_address[i] = 0;
2639 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2640 ((trace_buffer[i] & 0xf0) == 0xd0))
2641 {
2642 if (i >= 3)
2643 is_address[--i] = 1;
2644 if (i >= 2)
2645 is_address[--i] = 1;
2646 if (i >= 1)
2647 is_address[--i] = 1;
2648 if (i >= 0)
2649 is_address[--i] = 1;
2650 }
2651 }
2652
2653
2654 /* search first non-zero entry */
2655 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2656 ;
2657
2658 if (j == 256)
2659 {
2660 DEBUG("no trace data collected");
2661 return ERROR_XSCALE_NO_TRACE_DATA;
2662 }
2663
2664 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2665 ;
2666
2667 *trace_data_p = malloc(sizeof(xscale_trace_data_t));
2668 (*trace_data_p)->next = NULL;
2669 (*trace_data_p)->chkpt0 = trace_buffer[256];
2670 (*trace_data_p)->chkpt1 = trace_buffer[257];
2671 (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
2672 (*trace_data_p)->entries = malloc(sizeof(xscale_trace_entry_t) * (256 - j));
2673 (*trace_data_p)->depth = 256 - j;
2674
2675 for (i = j; i < 256; i++)
2676 {
2677 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2678 if (is_address[i])
2679 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2680 else
2681 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2682 }
2683
2684 return ERROR_OK;
2685 }
2686
2687 int xscale_read_instruction(target_t *target, arm_instruction_t *instruction)
2688 {
2689 /* get pointers to arch-specific information */
2690 armv4_5_common_t *armv4_5 = target->arch_info;
2691 xscale_common_t *xscale = armv4_5->arch_info;
2692 int i;
2693 int section = -1;
2694 u32 size_read;
2695 u32 opcode;
2696 int retval;
2697
2698 if (!xscale->trace.image)
2699 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2700
2701 /* search for the section the current instruction belongs to */
2702 for (i = 0; i < xscale->trace.image->num_sections; i++)
2703 {
2704 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2705 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2706 {
2707 section = i;
2708 break;
2709 }
2710 }
2711
2712 if (section == -1)
2713 {
2714 /* current instruction couldn't be found in the image */
2715 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2716 }
2717
2718 if (xscale->trace.core_state == ARMV4_5_STATE_ARM)
2719 {
2720 u8 buf[4];
2721 if ((retval = image_read_section(xscale->trace.image, section,
2722 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2723 4, buf, &size_read)) != ERROR_OK)
2724 {
2725 ERROR("error while reading instruction: %i", retval);
2726 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2727 }
2728 opcode = target_buffer_get_u32(target, buf);
2729 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2730 }
2731 else if (xscale->trace.core_state == ARMV4_5_STATE_THUMB)
2732 {
2733 u8 buf[2];
2734 if ((retval = image_read_section(xscale->trace.image, section,
2735 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2736 2, buf, &size_read)) != ERROR_OK)
2737 {
2738 ERROR("error while reading instruction: %i", retval);
2739 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2740 }
2741 opcode = target_buffer_get_u16(target, buf);
2742 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2743 }
2744 else
2745 {
2746 ERROR("BUG: unknown core state encountered");
2747 exit(-1);
2748 }
2749
2750 return ERROR_OK;
2751 }
2752
2753 int xscale_branch_address(xscale_trace_data_t *trace_data, int i, u32 *target)
2754 {
2755 /* if there are less than four entries prior to the indirect branch message
2756 * we can't extract the address */
2757 if (i < 4)
2758 {
2759 return -1;
2760 }
2761
2762 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2763 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2764
2765 return 0;
2766 }
2767
2768 int xscale_analyze_trace(target_t *target, command_context_t *cmd_ctx)
2769 {
2770 /* get pointers to arch-specific information */
2771 armv4_5_common_t *armv4_5 = target->arch_info;
2772 xscale_common_t *xscale = armv4_5->arch_info;
2773 int next_pc_ok = 0;
2774 u32 next_pc = 0x0;
2775 xscale_trace_data_t *trace_data = xscale->trace.data;
2776 int retval;
2777
2778 while (trace_data)
2779 {
2780 int i, chkpt;
2781 int rollover;
2782 int branch;
2783 int exception;
2784 xscale->trace.core_state = ARMV4_5_STATE_ARM;
2785
2786 chkpt = 0;
2787 rollover = 0;
2788
2789 for (i = 0; i < trace_data->depth; i++)
2790 {
2791 next_pc_ok = 0;
2792 branch = 0;
2793 exception = 0;
2794
2795 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2796 continue;
2797
2798 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2799 {
2800 case 0: /* Exceptions */
2801 case 1:
2802 case 2:
2803 case 3:
2804 case 4:
2805 case 5:
2806 case 6:
2807 case 7:
2808 exception = (trace_data->entries[i].data & 0x70) >> 4;
2809 next_pc_ok = 1;
2810 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2811 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2812 break;
2813 case 8: /* Direct Branch */
2814 branch = 1;
2815 break;
2816 case 9: /* Indirect Branch */
2817 branch = 1;
2818 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2819 {
2820 next_pc_ok = 1;
2821 }
2822 break;
2823 case 13: /* Checkpointed Indirect Branch */
2824 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2825 {
2826 next_pc_ok = 1;
2827 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2828 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2829 WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2830 }
2831 /* explicit fall-through */
2832 case 12: /* Checkpointed Direct Branch */
2833 branch = 1;
2834 if (chkpt == 0)
2835 {
2836 next_pc_ok = 1;
2837 next_pc = trace_data->chkpt0;
2838 chkpt++;
2839 }
2840 else if (chkpt == 1)
2841 {
2842 next_pc_ok = 1;
2843 next_pc = trace_data->chkpt0;
2844 chkpt++;
2845 }
2846 else
2847 {
2848 WARNING("more than two checkpointed branches encountered");
2849 }
2850 break;
2851 case 15: /* Roll-over */
2852 rollover++;
2853 continue;
2854 default: /* Reserved */
2855 command_print(cmd_ctx, "--- reserved trace message ---");
2856 ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2857 return ERROR_OK;
2858 }
2859
2860 if (xscale->trace.pc_ok)
2861 {
2862 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2863 arm_instruction_t instruction;
2864
2865 if ((exception == 6) || (exception == 7))
2866 {
2867 /* IRQ or FIQ exception, no instruction executed */
2868 executed -= 1;
2869 }
2870
2871 while (executed-- >= 0)
2872 {
2873 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2874 {
2875 /* can't continue tracing with no image available */
2876 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2877 {
2878 return retval;
2879 }
2880 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2881 {
2882 /* TODO: handle incomplete images */
2883 }
2884 }
2885
2886 /* a precise abort on a load to the PC is included in the incremental
2887 * word count, other instructions causing data aborts are not included
2888 */
2889 if ((executed == 0) && (exception == 4)
2890 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2891 {
2892 if ((instruction.type == ARM_LDM)
2893 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2894 {
2895 executed--;
2896 }
2897 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2898 && (instruction.info.load_store.Rd != 15))
2899 {
2900 executed--;
2901 }
2902 }
2903
2904 /* only the last instruction executed
2905 * (the one that caused the control flow change)
2906 * could be a taken branch
2907 */
2908 if (((executed == -1) && (branch == 1)) &&
2909 (((instruction.type == ARM_B) ||
2910 (instruction.type == ARM_BL) ||
2911 (instruction.type == ARM_BLX)) &&
2912 (instruction.info.b_bl_bx_blx.target_address != -1)))
2913 {
2914 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2915 }
2916 else
2917 {
2918 xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2;
2919 }
2920 command_print(cmd_ctx, "%s", instruction.text);
2921 }
2922
2923 rollover = 0;
2924 }
2925
2926 if (next_pc_ok)
2927 {
2928 xscale->trace.current_pc = next_pc;
2929 xscale->trace.pc_ok = 1;
2930 }
2931 }
2932
2933 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2)
2934 {
2935 arm_instruction_t instruction;
2936 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2937 {
2938 /* can't continue tracing with no image available */
2939 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2940 {
2941 return retval;
2942 }
2943 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2944 {
2945 /* TODO: handle incomplete images */
2946 }
2947 }
2948 command_print(cmd_ctx, "%s", instruction.text);
2949 }
2950
2951 trace_data = trace_data->next;
2952 }
2953
2954 return ERROR_OK;
2955 }
2956
2957 void xscale_build_reg_cache(target_t *target)
2958 {
2959 /* get pointers to arch-specific information */
2960 armv4_5_common_t *armv4_5 = target->arch_info;
2961 xscale_common_t *xscale = armv4_5->arch_info;
2962
2963 reg_cache_t **cache_p = register_get_last_cache_p(&target->reg_cache);
2964 xscale_reg_t *arch_info = malloc(sizeof(xscale_reg_arch_info));
2965 int i;
2966 int num_regs = sizeof(xscale_reg_arch_info) / sizeof(xscale_reg_t);
2967
2968 (*cache_p) = armv4_5_build_reg_cache(target, armv4_5);
2969 armv4_5->core_cache = (*cache_p);
2970
2971 /* register a register arch-type for XScale dbg registers only once */
2972 if (xscale_reg_arch_type == -1)
2973 xscale_reg_arch_type = register_reg_arch_type(xscale_get_reg, xscale_set_reg);
2974
2975 (*cache_p)->next = malloc(sizeof(reg_cache_t));
2976 cache_p = &(*cache_p)->next;
2977
2978 /* fill in values for the xscale reg cache */
2979 (*cache_p)->name = "XScale registers";
2980 (*cache_p)->next = NULL;
2981 (*cache_p)->reg_list = malloc(num_regs * sizeof(reg_t));
2982 (*cache_p)->num_regs = num_regs;
2983
2984 for (i = 0; i < num_regs; i++)
2985 {
2986 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2987 (*cache_p)->reg_list[i].value = calloc(4, 1);
2988 (*cache_p)->reg_list[i].dirty = 0;
2989 (*cache_p)->reg_list[i].valid = 0;
2990 (*cache_p)->reg_list[i].size = 32;
2991 (*cache_p)->reg_list[i].bitfield_desc = NULL;
2992 (*cache_p)->reg_list[i].num_bitfields = 0;
2993 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2994 (*cache_p)->reg_list[i].arch_type = xscale_reg_arch_type;
2995 arch_info[i] = xscale_reg_arch_info[i];
2996 arch_info[i].target = target;
2997 }
2998
2999 xscale->reg_cache = (*cache_p);
3000 }
3001
3002 int xscale_init_target(struct command_context_s *cmd_ctx, struct target_s *target)
3003 {
3004 if (startup_mode != DAEMON_RESET)
3005 {
3006 ERROR("XScale target requires a reset");
3007 ERROR("Reset target to enable debug");
3008 }
3009
3010 /* assert TRST once during startup */
3011 jtag_add_reset(1, 0);
3012 jtag_add_sleep(5000);
3013 jtag_add_reset(0, 0);
3014 jtag_execute_queue();
3015
3016 return ERROR_OK;
3017 }
3018
3019 int xscale_quit()
3020 {
3021
3022 return ERROR_OK;
3023 }
3024
3025 int xscale_init_arch_info(target_t *target, xscale_common_t *xscale, int chain_pos, char *variant)
3026 {
3027 armv4_5_common_t *armv4_5;
3028 u32 high_reset_branch, low_reset_branch;
3029 int i;
3030
3031 armv4_5 = &xscale->armv4_5_common;
3032
3033 /* store architecture specfic data (none so far) */
3034 xscale->arch_info = NULL;
3035 xscale->common_magic = XSCALE_COMMON_MAGIC;
3036
3037 /* remember the variant (PXA25x, PXA27x, IXP42x, ...) */
3038 xscale->variant = strdup(variant);
3039
3040 /* prepare JTAG information for the new target */
3041 xscale->jtag_info.chain_pos = chain_pos;
3042 jtag_register_event_callback(xscale_jtag_callback, target);
3043
3044 xscale->jtag_info.dbgrx = 0x02;
3045 xscale->jtag_info.dbgtx = 0x10;
3046 xscale->jtag_info.dcsr = 0x09;
3047 xscale->jtag_info.ldic = 0x07;
3048
3049 if ((strcmp(xscale->variant, "pxa250") == 0) ||
3050 (strcmp(xscale->variant, "pxa255") == 0) ||
3051 (strcmp(xscale->variant, "pxa26x") == 0))
3052 {
3053 xscale->jtag_info.ir_length = 5;
3054 }
3055 else if ((strcmp(xscale->variant, "pxa27x") == 0) ||
3056 (strcmp(xscale->variant, "ixp42x") == 0) ||
3057 (strcmp(xscale->variant, "ixp45x") == 0) ||
3058 (strcmp(xscale->variant, "ixp46x") == 0))
3059 {
3060 xscale->jtag_info.ir_length = 7;
3061 }
3062
3063 /* the debug handler isn't installed (and thus not running) at this time */
3064 xscale->handler_installed = 0;
3065 xscale->handler_running = 0;
3066 xscale->handler_address = 0xfe000800;
3067
3068 /* clear the vectors we keep locally for reference */
3069 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
3070 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
3071
3072 /* no user-specified vectors have been configured yet */
3073 xscale->static_low_vectors_set = 0x0;
3074 xscale->static_high_vectors_set = 0x0;
3075
3076 /* calculate branches to debug handler */
3077 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
3078 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
3079
3080 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
3081 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
3082
3083 for (i = 1; i <= 7; i++)
3084 {
3085 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3086 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
3087 }
3088
3089 /* 64kB aligned region used for DCache cleaning */
3090 xscale->cache_clean_address = 0xfffe0000;
3091
3092 xscale->hold_rst = 0;
3093 xscale->external_debug_break = 0;
3094
3095 xscale->force_hw_bkpts = 1;
3096
3097 xscale->ibcr_available = 2;
3098 xscale->ibcr0_used = 0;
3099 xscale->ibcr1_used = 0;
3100
3101 xscale->dbr_available = 2;
3102 xscale->dbr0_used = 0;
3103 xscale->dbr1_used = 0;
3104
3105 xscale->arm_bkpt = ARMV5_BKPT(0x0);
3106 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
3107
3108 xscale->vector_catch = 0x1;
3109
3110 xscale->trace.capture_status = TRACE_IDLE;
3111 xscale->trace.data = NULL;
3112 xscale->trace.image = NULL;
3113 xscale->trace.buffer_enabled = 0;
3114 xscale->trace.buffer_fill = 0;
3115
3116 /* prepare ARMv4/5 specific information */
3117 armv4_5->arch_info = xscale;
3118 armv4_5->read_core_reg = xscale_read_core_reg;
3119 armv4_5->write_core_reg = xscale_write_core_reg;
3120 armv4_5->full_context = xscale_full_context;
3121
3122 armv4_5_init_arch_info(target, armv4_5);
3123
3124 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
3125 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
3126 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
3127 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
3128 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
3129 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
3130 xscale->armv4_5_mmu.has_tiny_pages = 1;
3131 xscale->armv4_5_mmu.mmu_enabled = 0;
3132
3133 xscale->fast_memory_access = 0;
3134
3135 return ERROR_OK;
3136 }
3137
3138 /* target xscale <endianess> <startup_mode> <chain_pos> <variant> */
3139 int xscale_target_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc, struct target_s *target)
3140 {
3141 int chain_pos;
3142 char *variant = NULL;
3143 xscale_common_t *xscale = malloc(sizeof(xscale_common_t));
3144
3145 if (argc < 5)
3146 {
3147 ERROR("'target xscale' requires four arguments: <endianess> <startup_mode> <chain_pos> <variant>");
3148 return ERROR_OK;
3149 }
3150
3151 chain_pos = strtoul(args[3], NULL, 0);
3152
3153 variant = args[4];
3154
3155 xscale_init_arch_info(target, xscale, chain_pos, variant);
3156 xscale_build_reg_cache(target);
3157
3158 return ERROR_OK;
3159 }
3160
3161 int xscale_handle_debug_handler_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3162 {
3163 target_t *target = NULL;
3164 armv4_5_common_t *armv4_5;
3165 xscale_common_t *xscale;
3166
3167 u32 handler_address;
3168
3169 if (argc < 2)
3170 {
3171 ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3172 return ERROR_OK;
3173 }
3174
3175 if ((target = get_target_by_num(strtoul(args[0], NULL, 0))) == NULL)
3176 {
3177 ERROR("no target '%s' configured", args[0]);
3178 return ERROR_OK;
3179 }
3180
3181 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3182 {
3183 return ERROR_OK;
3184 }
3185
3186 handler_address = strtoul(args[1], NULL, 0);
3187
3188 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3189 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3190 {
3191 xscale->handler_address = handler_address;
3192 }
3193 else
3194 {
3195 ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3196 }
3197
3198 return ERROR_OK;
3199 }
3200
3201 int xscale_handle_cache_clean_address_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3202 {
3203 target_t *target = NULL;
3204 armv4_5_common_t *armv4_5;
3205 xscale_common_t *xscale;
3206
3207 u32 cache_clean_address;
3208
3209 if (argc < 2)
3210 {
3211 ERROR("'xscale cache_clean_address <target#> <address>' command takes two required operands");
3212 return ERROR_OK;
3213 }
3214
3215 if ((target = get_target_by_num(strtoul(args[0], NULL, 0))) == NULL)
3216 {
3217 ERROR("no target '%s' configured", args[0]);
3218 return ERROR_OK;
3219 }
3220
3221 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3222 {
3223 return ERROR_OK;
3224 }
3225
3226 cache_clean_address = strtoul(args[1], NULL, 0);
3227
3228 if (cache_clean_address & 0xffff)
3229 {
3230 ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3231 }
3232 else
3233 {
3234 xscale->cache_clean_address = cache_clean_address;
3235 }
3236
3237 return ERROR_OK;
3238 }
3239
3240 int xscale_handle_cache_info_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3241 {
3242 target_t *target = get_current_target(cmd_ctx);
3243 armv4_5_common_t *armv4_5;
3244 xscale_common_t *xscale;
3245
3246 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3247 {
3248 return ERROR_OK;
3249 }
3250
3251 return armv4_5_handle_cache_info_command(cmd_ctx, &xscale->armv4_5_mmu.armv4_5_cache);
3252 }
3253
3254 static int xscale_virt2phys(struct target_s *target, u32 virtual, u32 *physical)
3255 {
3256 armv4_5_common_t *armv4_5;
3257 xscale_common_t *xscale;
3258 int retval;
3259 int type;
3260 u32 cb;
3261 int domain;
3262 u32 ap;
3263
3264
3265 if ((retval = xscale_get_arch_pointers(target, &armv4_5, &xscale)) != ERROR_OK)
3266 {
3267 return retval;
3268 }
3269 u32 ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3270 if (type == -1)
3271 {
3272 return ret;
3273 }
3274 *physical = ret;
3275 return ERROR_OK;
3276 }
3277
3278 static int xscale_mmu(struct target_s *target, int *enabled)
3279 {
3280 armv4_5_common_t *armv4_5 = target->arch_info;
3281 xscale_common_t *xscale = armv4_5->arch_info;
3282
3283 if (target->state != TARGET_HALTED)
3284 {
3285 ERROR("Target not halted");
3286 return ERROR_TARGET_INVALID;
3287 }
3288 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3289 return ERROR_OK;
3290 }
3291
3292
3293 int xscale_handle_mmu_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3294 {
3295 target_t *target = get_current_target(cmd_ctx);
3296 armv4_5_common_t *armv4_5;
3297 xscale_common_t *xscale;
3298
3299 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3300 {
3301 return ERROR_OK;
3302 }
3303
3304 if (target->state != TARGET_HALTED)
3305 {
3306 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3307 return ERROR_OK;
3308 }
3309
3310 if (argc >= 1)
3311 {
3312 if (strcmp("enable", args[0]) == 0)
3313 {
3314 xscale_enable_mmu_caches(target, 1, 0, 0);
3315 xscale->armv4_5_mmu.mmu_enabled = 1;
3316 }
3317 else if (strcmp("disable", args[0]) == 0)
3318 {
3319 xscale_disable_mmu_caches(target, 1, 0, 0);
3320 xscale->armv4_5_mmu.mmu_enabled = 0;
3321 }
3322 }
3323
3324 command_print(cmd_ctx, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3325
3326 return ERROR_OK;
3327 }
3328
3329 int xscale_handle_idcache_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3330 {
3331 target_t *target = get_current_target(cmd_ctx);
3332 armv4_5_common_t *armv4_5;
3333 xscale_common_t *xscale;
3334 int icache = 0, dcache = 0;
3335
3336 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3337 {
3338 return ERROR_OK;
3339 }
3340
3341 if (target->state != TARGET_HALTED)
3342 {
3343 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3344 return ERROR_OK;
3345 }
3346
3347 if (strcmp(cmd, "icache") == 0)
3348 icache = 1;
3349 else if (strcmp(cmd, "dcache") == 0)
3350 dcache = 1;
3351
3352 if (argc >= 1)
3353 {
3354 if (strcmp("enable", args[0]) == 0)
3355 {
3356 xscale_enable_mmu_caches(target, 0, dcache, icache);
3357
3358 if (icache)
3359 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 1;
3360 else if (dcache)
3361 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 1;
3362 }
3363 else if (strcmp("disable", args[0]) == 0)
3364 {
3365 xscale_disable_mmu_caches(target, 0, dcache, icache);
3366
3367 if (icache)
3368 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 0;
3369 else if (dcache)
3370 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 0;
3371 }
3372 }
3373
3374 if (icache)
3375 command_print(cmd_ctx, "icache %s", (xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled) ? "enabled" : "disabled");
3376
3377 if (dcache)
3378 command_print(cmd_ctx, "dcache %s", (xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled) ? "enabled" : "disabled");
3379
3380 return ERROR_OK;
3381 }
3382
3383 int xscale_handle_vector_catch_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
3384 {
3385 target_t *target = get_current_target(cmd_ctx);
3386 armv4_5_common_t *armv4_5;
3387 xscale_common_t *xscale;
3388
3389 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3390 {
3391 return ERROR_OK;
3392 }
3393
3394 if (argc < 1)
3395 {
3396 command_print(cmd_ctx, "usage: xscale vector_catch [mask]");
3397 }
3398 else
3399 {
3400 xscale->vector_catch = strtoul(args[0], NULL, 0);
3401 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3402 xscale_write_dcsr(target, -1, -1);
3403 }
3404
3405 command_print(cmd_ctx, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3406
3407 return ERROR_OK;
3408 }
3409
3410 int xscale_handle_force_hw_bkpts_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3411 {
3412 target_t *target = get_current_target(cmd_ctx);
3413 armv4_5_common_t *armv4_5;
3414 xscale_common_t *xscale;
3415
3416 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3417 {
3418 return ERROR_OK;
3419 }
3420
3421 if ((argc >= 1) && (strcmp("enable", args[0]) == 0))
3422 {
3423 xscale->force_hw_bkpts = 1;
3424 }
3425 else if ((argc >= 1) && (strcmp("disable", args[0]) == 0))
3426 {
3427 xscale->force_hw_bkpts = 0;
3428 }
3429 else
3430 {
3431 command_print(cmd_ctx, "usage: xscale force_hw_bkpts <enable|disable>");
3432 }
3433
3434 command_print(cmd_ctx, "force hardware breakpoints %s", (xscale->force_hw_bkpts) ? "enabled" : "disabled");
3435
3436 return ERROR_OK;
3437 }
3438
3439 int xscale_handle_trace_buffer_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3440 {
3441 target_t *target = get_current_target(cmd_ctx);
3442 armv4_5_common_t *armv4_5;
3443 xscale_common_t *xscale;
3444 u32 dcsr_value;
3445
3446 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3447 {
3448 return ERROR_OK;
3449 }
3450
3451 if (target->state != TARGET_HALTED)
3452 {
3453 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
3454 return ERROR_OK;
3455 }
3456
3457 if ((argc >= 1) && (strcmp("enable", args[0]) == 0))
3458 {
3459 xscale_trace_data_t *td, *next_td;
3460 xscale->trace.buffer_enabled = 1;
3461
3462 /* free old trace data */
3463 td = xscale->trace.data;
3464 while (td)
3465 {
3466 next_td = td->next;
3467
3468 if (td->entries)
3469 free(td->entries);
3470 free(td);
3471 td = next_td;
3472 }
3473 xscale->trace.data = NULL;
3474 }
3475 else if ((argc >= 1) && (strcmp("disable", args[0]) == 0))
3476 {
3477 xscale->trace.buffer_enabled = 0;
3478 }
3479
3480 if ((argc >= 2) && (strcmp("fill", args[1]) == 0))
3481 {
3482 if (argc >= 3)
3483 xscale->trace.buffer_fill = strtoul(args[2], NULL, 0);
3484 else
3485 xscale->trace.buffer_fill = 1;
3486 }
3487 else if ((argc >= 2) && (strcmp("wrap", args[1]) == 0))
3488 {
3489 xscale->trace.buffer_fill = -1;
3490 }
3491
3492 if (xscale->trace.buffer_enabled)
3493 {
3494 /* if we enable the trace buffer in fill-once
3495 * mode we know the address of the first instruction */
3496 xscale->trace.pc_ok = 1;
3497 xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
3498 }
3499 else
3500 {
3501 /* otherwise the address is unknown, and we have no known good PC */
3502 xscale->trace.pc_ok = 0;
3503 }
3504
3505 command_print(cmd_ctx, "trace buffer %s (%s)",
3506 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3507 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3508
3509 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3510 if (xscale->trace.buffer_fill >= 0)
3511 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3512 else
3513 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3514
3515 return ERROR_OK;
3516 }
3517
3518 int xscale_handle_trace_image_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
3519 {
3520 target_t *target;
3521 armv4_5_common_t *armv4_5;
3522 xscale_common_t *xscale;
3523
3524 if (argc < 1)
3525 {
3526 command_print(cmd_ctx, "usage: xscale trace_image <file> [base address] [type]");
3527 return ERROR_OK;
3528 }
3529
3530 target = get_current_target(cmd_ctx);
3531
3532 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
3533 {
3534 return ERROR_OK;
3535 }
3536
3537 if (xscale->trace.image)
3538 {
3539 image_close(xscale->trace.image);
3540 free(xscale->trace.image);
3541 command_print(cmd_ctx, "previously loaded image found and closed");
3542 }
3543
3544 xscale->trace.image = malloc(sizeof(image_t));
3545 xscale->trace.image->base_address_set = 0;
3546 xscale->trace.image->start_address_set = 0;
3547
3548 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3549 if (argc >= 2)
3550 {
3551 xscale->trace.image->base_address_set = 1;
3552 xscale->trace.image->base_address = strtoul(args[1], NULL, 0);
3553 }
3554 else
3555 {
3556 xscale->trace.image->base_address_set = 0;
3557 }
3558
3559 if (image_open(xscale->trace.image, args[0], (argc >= 3) ? args[2] : NULL) != ERROR_OK)
3560 {
3561 free(xscale->trace.image);
3562 xscale->trace.image = NULL;
3563 return ERROR_OK;
3564 }
3565
3566 return ERROR_OK;