armv7a: fix interpretation of MMU table
[openocd.git] / src / target / armv7a.c
1 /***************************************************************************
2 * Copyright (C) 2009 by David Brownell *
3 * *
4 * Copyright (C) ST-Ericsson SA 2011 michel.jaouen@stericsson.com *
5 * *
6 * This program is free software; you can redistribute it and/or modify *
7 * it under the terms of the GNU General Public License as published by *
8 * the Free Software Foundation; either version 2 of the License, or *
9 * (at your option) any later version. *
10 * *
11 * This program is distributed in the hope that it will be useful, *
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
14 * GNU General Public License for more details. *
15 * *
16 * You should have received a copy of the GNU General Public License *
17 * along with this program; if not, write to the *
18 * Free Software Foundation, Inc., *
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
20 ***************************************************************************/
21
22 #ifdef HAVE_CONFIG_H
23 #include "config.h"
24 #endif
25
26 #include <helper/replacements.h>
27
28 #include "armv7a.h"
29 #include "arm_disassembler.h"
30
31 #include "register.h"
32 #include <helper/binarybuffer.h>
33 #include <helper/command.h>
34
35 #include <stdlib.h>
36 #include <string.h>
37 #include <unistd.h>
38
39 #include "arm_opcodes.h"
40 #include "target.h"
41 #include "target_type.h"
42
43 static void armv7a_show_fault_registers(struct target *target)
44 {
45 uint32_t dfsr, ifsr, dfar, ifar;
46 struct armv7a_common *armv7a = target_to_armv7a(target);
47 struct arm_dpm *dpm = armv7a->arm.dpm;
48 int retval;
49
50 retval = dpm->prepare(dpm);
51 if (retval != ERROR_OK)
52 return;
53
54 /* ARMV4_5_MRC(cpnum, op1, r0, CRn, CRm, op2) */
55
56 /* c5/c0 - {data, instruction} fault status registers */
57 retval = dpm->instr_read_data_r0(dpm,
58 ARMV4_5_MRC(15, 0, 0, 5, 0, 0),
59 &dfsr);
60 if (retval != ERROR_OK)
61 goto done;
62
63 retval = dpm->instr_read_data_r0(dpm,
64 ARMV4_5_MRC(15, 0, 0, 5, 0, 1),
65 &ifsr);
66 if (retval != ERROR_OK)
67 goto done;
68
69 /* c6/c0 - {data, instruction} fault address registers */
70 retval = dpm->instr_read_data_r0(dpm,
71 ARMV4_5_MRC(15, 0, 0, 6, 0, 0),
72 &dfar);
73 if (retval != ERROR_OK)
74 goto done;
75
76 retval = dpm->instr_read_data_r0(dpm,
77 ARMV4_5_MRC(15, 0, 0, 6, 0, 2),
78 &ifar);
79 if (retval != ERROR_OK)
80 goto done;
81
82 LOG_USER("Data fault registers DFSR: %8.8" PRIx32
83 ", DFAR: %8.8" PRIx32, dfsr, dfar);
84 LOG_USER("Instruction fault registers IFSR: %8.8" PRIx32
85 ", IFAR: %8.8" PRIx32, ifsr, ifar);
86
87 done:
88 /* (void) */ dpm->finish(dpm);
89 }
90
91
92 /* retrieve main id register */
93 static int armv7a_read_midr(struct target *target)
94 {
95 int retval = ERROR_FAIL;
96 struct armv7a_common *armv7a = target_to_armv7a(target);
97 struct arm_dpm *dpm = armv7a->arm.dpm;
98 uint32_t midr;
99 retval = dpm->prepare(dpm);
100 if (retval != ERROR_OK)
101 goto done;
102 /* MRC p15,0,<Rd>,c0,c0,0; read main id register*/
103
104 retval = dpm->instr_read_data_r0(dpm,
105 ARMV4_5_MRC(15, 0, 0, 0, 0, 0),
106 &midr);
107 if (retval != ERROR_OK)
108 goto done;
109
110 armv7a->rev = (midr & 0xf);
111 armv7a->partnum = (midr >> 4) & 0xfff;
112 armv7a->arch = (midr >> 16) & 0xf;
113 armv7a->variant = (midr >> 20) & 0xf;
114 armv7a->implementor = (midr >> 24) & 0xff;
115 LOG_INFO("%s rev %" PRIx32 ", partnum %" PRIx32 ", arch %" PRIx32
116 ", variant %" PRIx32 ", implementor %" PRIx32,
117 target->cmd_name,
118 armv7a->rev,
119 armv7a->partnum,
120 armv7a->arch,
121 armv7a->variant,
122 armv7a->implementor);
123
124 done:
125 dpm->finish(dpm);
126 return retval;
127 }
128
129 static int armv7a_read_ttbcr(struct target *target)
130 {
131 struct armv7a_common *armv7a = target_to_armv7a(target);
132 struct arm_dpm *dpm = armv7a->arm.dpm;
133 uint32_t ttbcr;
134 uint32_t ttbr0, ttbr1;
135 int retval = dpm->prepare(dpm);
136 if (retval != ERROR_OK)
137 goto done;
138 /* MRC p15,0,<Rt>,c2,c0,2 ; Read CP15 Translation Table Base Control Register*/
139 retval = dpm->instr_read_data_r0(dpm,
140 ARMV4_5_MRC(15, 0, 0, 2, 0, 2),
141 &ttbcr);
142 if (retval != ERROR_OK)
143 goto done;
144
145 retval = dpm->instr_read_data_r0(dpm,
146 ARMV4_5_MRC(15, 0, 0, 2, 0, 0),
147 &ttbr0);
148 if (retval != ERROR_OK)
149 goto done;
150
151 retval = dpm->instr_read_data_r0(dpm,
152 ARMV4_5_MRC(15, 0, 0, 2, 0, 1),
153 &ttbr1);
154 if (retval != ERROR_OK)
155 goto done;
156
157 LOG_INFO("ttbcr %" PRIx32 "ttbr0 %" PRIx32 "ttbr1 %" PRIx32, ttbcr, ttbr0, ttbr1);
158
159 armv7a->armv7a_mmu.ttbr1_used = ((ttbcr & 0x7) != 0) ? 1 : 0;
160 armv7a->armv7a_mmu.ttbr0_mask = 0;
161
162 retval = armv7a_read_midr(target);
163 if (retval != ERROR_OK)
164 goto done;
165
166 if (armv7a->partnum & 0xf) {
167 /*
168 * ARM Architecture Reference Manual (ARMv7-A and ARMv7-Redition),
169 * document # ARM DDI 0406C
170 */
171 armv7a->armv7a_mmu.ttbr0_mask = 1 << (14 - ((ttbcr & 0x7)));
172 } else {
173 /* ARM DDI 0344H , ARM DDI 0407F */
174 armv7a->armv7a_mmu.ttbr0_mask = 7 << (32 - ((ttbcr & 0x7)));
175 /* fix me , default is hard coded LINUX border */
176 armv7a->armv7a_mmu.os_border = 0xc0000000;
177 }
178
179 LOG_DEBUG("ttbr1 %s, ttbr0_mask %" PRIx32,
180 armv7a->armv7a_mmu.ttbr1_used ? "used" : "not used",
181 armv7a->armv7a_mmu.ttbr0_mask);
182
183 if (armv7a->armv7a_mmu.ttbr1_used == 1) {
184 LOG_INFO("SVC access above %" PRIx32,
185 (0xffffffff & armv7a->armv7a_mmu.ttbr0_mask));
186 armv7a->armv7a_mmu.os_border = 0xffffffff & armv7a->armv7a_mmu.ttbr0_mask;
187 }
188 done:
189 dpm->finish(dpm);
190 return retval;
191 }
192
193 /* method adapted to cortex A : reused arm v4 v5 method*/
194 int armv7a_mmu_translate_va(struct target *target, uint32_t va, uint32_t *val)
195 {
196 uint32_t first_lvl_descriptor = 0x0;
197 uint32_t second_lvl_descriptor = 0x0;
198 int retval;
199 struct armv7a_common *armv7a = target_to_armv7a(target);
200 struct arm_dpm *dpm = armv7a->arm.dpm;
201 uint32_t ttb = 0; /* default ttb0 */
202 if (armv7a->armv7a_mmu.ttbr1_used == -1)
203 armv7a_read_ttbcr(target);
204 if ((armv7a->armv7a_mmu.ttbr1_used) &&
205 (va > (0xffffffff & armv7a->armv7a_mmu.ttbr0_mask))) {
206 /* select ttb 1 */
207 ttb = 1;
208 }
209 retval = dpm->prepare(dpm);
210 if (retval != ERROR_OK)
211 goto done;
212
213 /* MRC p15,0,<Rt>,c2,c0,ttb */
214 retval = dpm->instr_read_data_r0(dpm,
215 ARMV4_5_MRC(15, 0, 0, 2, 0, ttb),
216 &ttb);
217 if (retval != ERROR_OK)
218 return retval;
219 retval = armv7a->armv7a_mmu.read_physical_memory(target,
220 (ttb & 0xffffc000) | ((va & 0xfff00000) >> 18),
221 4, 1, (uint8_t *)&first_lvl_descriptor);
222 if (retval != ERROR_OK)
223 return retval;
224 first_lvl_descriptor = target_buffer_get_u32(target, (uint8_t *)
225 &first_lvl_descriptor);
226 /* reuse armv4_5 piece of code, specific armv7a changes may come later */
227 LOG_DEBUG("1st lvl desc: %8.8" PRIx32 "", first_lvl_descriptor);
228
229 if ((first_lvl_descriptor & 0x3) == 0) {
230 LOG_ERROR("Address translation failure");
231 return ERROR_TARGET_TRANSLATION_FAULT;
232 }
233
234
235 if ((first_lvl_descriptor & 0x40002) == 2) {
236 /* section descriptor */
237 *val = (first_lvl_descriptor & 0xfff00000) | (va & 0x000fffff);
238 return ERROR_OK;
239 } else if ((first_lvl_descriptor & 0x40002) == 0x40002) {
240 /* supersection descriptor */
241 if (first_lvl_descriptor & 0x00f001e0) {
242 LOG_ERROR("Physical address does not fit into 32 bits");
243 return ERROR_TARGET_TRANSLATION_FAULT;
244 }
245 *val = (first_lvl_descriptor & 0xff000000) | (va & 0x00ffffff);
246 return ERROR_OK;
247 }
248
249 /* page table */
250 retval = armv7a->armv7a_mmu.read_physical_memory(target,
251 (first_lvl_descriptor & 0xfffffc00) | ((va & 0x000ff000) >> 10),
252 4, 1, (uint8_t *)&second_lvl_descriptor);
253 if (retval != ERROR_OK)
254 return retval;
255
256 second_lvl_descriptor = target_buffer_get_u32(target, (uint8_t *)
257 &second_lvl_descriptor);
258
259 LOG_DEBUG("2nd lvl desc: %8.8" PRIx32 "", second_lvl_descriptor);
260
261 if ((second_lvl_descriptor & 0x3) == 0) {
262 LOG_ERROR("Address translation failure");
263 return ERROR_TARGET_TRANSLATION_FAULT;
264 }
265
266 if ((second_lvl_descriptor & 0x3) == 1) {
267 /* large page descriptor */
268 *val = (second_lvl_descriptor & 0xffff0000) | (va & 0x0000ffff);
269 } else {
270 /* small page descriptor */
271 *val = (second_lvl_descriptor & 0xfffff000) | (va & 0x00000fff);
272 }
273
274 return ERROR_OK;
275
276 done:
277 return retval;
278 }
279
280 /* V7 method VA TO PA */
281 int armv7a_mmu_translate_va_pa(struct target *target, uint32_t va,
282 uint32_t *val, int meminfo)
283 {
284 int retval = ERROR_FAIL;
285 struct armv7a_common *armv7a = target_to_armv7a(target);
286 struct arm_dpm *dpm = armv7a->arm.dpm;
287 uint32_t virt = va & ~0xfff;
288 uint32_t NOS, NS, INNER, OUTER;
289 *val = 0xdeadbeef;
290 retval = dpm->prepare(dpm);
291 if (retval != ERROR_OK)
292 goto done;
293 /* mmu must be enable in order to get a correct translation
294 * use VA to PA CP15 register for conversion */
295 retval = dpm->instr_write_data_r0(dpm,
296 ARMV4_5_MCR(15, 0, 0, 7, 8, 0),
297 virt);
298 if (retval != ERROR_OK)
299 goto done;
300 retval = dpm->instr_read_data_r0(dpm,
301 ARMV4_5_MRC(15, 0, 0, 7, 4, 0),
302 val);
303 /* decode memory attribute */
304 NOS = (*val >> 10) & 1; /* Not Outer shareable */
305 NS = (*val >> 9) & 1; /* Non secure */
306 INNER = (*val >> 4) & 0x7;
307 OUTER = (*val >> 2) & 0x3;
308
309 if (retval != ERROR_OK)
310 goto done;
311 *val = (*val & ~0xfff) + (va & 0xfff);
312 if (*val == va)
313 LOG_WARNING("virt = phys : MMU disable !!");
314 if (meminfo) {
315 LOG_INFO("%" PRIx32 " : %" PRIx32 " %s outer shareable %s secured",
316 va, *val,
317 NOS == 1 ? "not" : " ",
318 NS == 1 ? "not" : "");
319 switch (OUTER) {
320 case 0:
321 LOG_INFO("outer: Non-Cacheable");
322 break;
323 case 1:
324 LOG_INFO("outer: Write-Back, Write-Allocate");
325 break;
326 case 2:
327 LOG_INFO("outer: Write-Through, No Write-Allocate");
328 break;
329 case 3:
330 LOG_INFO("outer: Write-Back, no Write-Allocate");
331 break;
332 }
333 switch (INNER) {
334 case 0:
335 LOG_INFO("inner: Non-Cacheable");
336 break;
337 case 1:
338 LOG_INFO("inner: Strongly-ordered");
339 break;
340 case 3:
341 LOG_INFO("inner: Device");
342 break;
343 case 5:
344 LOG_INFO("inner: Write-Back, Write-Allocate");
345 break;
346 case 6:
347 LOG_INFO("inner: Write-Through");
348 break;
349 case 7:
350 LOG_INFO("inner: Write-Back, no Write-Allocate");
351
352 default:
353 LOG_INFO("inner: %" PRIx32 " ???", INNER);
354 }
355 }
356
357 done:
358 dpm->finish(dpm);
359
360 return retval;
361 }
362
363 static int armv7a_handle_inner_cache_info_command(struct command_context *cmd_ctx,
364 struct armv7a_cache_common *armv7a_cache)
365 {
366 if (armv7a_cache->ctype == -1) {
367 command_print(cmd_ctx, "cache not yet identified");
368 return ERROR_OK;
369 }
370
371 command_print(cmd_ctx,
372 "D-Cache: linelen %" PRIi32 ", associativity %" PRIi32 ", nsets %" PRIi32 ", cachesize %" PRId32 " KBytes",
373 armv7a_cache->d_u_size.linelen,
374 armv7a_cache->d_u_size.associativity,
375 armv7a_cache->d_u_size.nsets,
376 armv7a_cache->d_u_size.cachesize);
377
378 command_print(cmd_ctx,
379 "I-Cache: linelen %" PRIi32 ", associativity %" PRIi32 ", nsets %" PRIi32 ", cachesize %" PRId32 " KBytes",
380 armv7a_cache->i_size.linelen,
381 armv7a_cache->i_size.associativity,
382 armv7a_cache->i_size.nsets,
383 armv7a_cache->i_size.cachesize);
384
385 return ERROR_OK;
386 }
387
388 static int _armv7a_flush_all_data(struct target *target)
389 {
390 struct armv7a_common *armv7a = target_to_armv7a(target);
391 struct arm_dpm *dpm = armv7a->arm.dpm;
392 struct armv7a_cachesize *d_u_size =
393 &(armv7a->armv7a_mmu.armv7a_cache.d_u_size);
394 int32_t c_way, c_index = d_u_size->index;
395 int retval;
396 /* check that cache data is on at target halt */
397 if (!armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled) {
398 LOG_INFO("flushed not performed :cache not on at target halt");
399 return ERROR_OK;
400 }
401 retval = dpm->prepare(dpm);
402 if (retval != ERROR_OK)
403 goto done;
404 do {
405 c_way = d_u_size->way;
406 do {
407 uint32_t value = (c_index << d_u_size->index_shift)
408 | (c_way << d_u_size->way_shift);
409 /* DCCISW */
410 /* LOG_INFO ("%d %d %x",c_way,c_index,value); */
411 retval = dpm->instr_write_data_r0(dpm,
412 ARMV4_5_MCR(15, 0, 0, 7, 14, 2),
413 value);
414 if (retval != ERROR_OK)
415 goto done;
416 c_way -= 1;
417 } while (c_way >= 0);
418 c_index -= 1;
419 } while (c_index >= 0);
420 return retval;
421 done:
422 LOG_ERROR("flushed failed");
423 dpm->finish(dpm);
424 return retval;
425 }
426
427 static int armv7a_flush_all_data(struct target *target)
428 {
429 int retval = ERROR_FAIL;
430 /* check that armv7a_cache is correctly identify */
431 struct armv7a_common *armv7a = target_to_armv7a(target);
432 if (armv7a->armv7a_mmu.armv7a_cache.ctype == -1) {
433 LOG_ERROR("trying to flush un-identified cache");
434 return retval;
435 }
436
437 if (target->smp) {
438 /* look if all the other target have been flushed in order to flush level
439 * 2 */
440 struct target_list *head;
441 struct target *curr;
442 head = target->head;
443 while (head != (struct target_list *)NULL) {
444 curr = head->target;
445 if (curr->state == TARGET_HALTED) {
446 LOG_INFO("Wait flushing data l1 on core %" PRId32, curr->coreid);
447 retval = _armv7a_flush_all_data(curr);
448 }
449 head = head->next;
450 }
451 } else
452 retval = _armv7a_flush_all_data(target);
453 return retval;
454 }
455
456 /* L2 is not specific to armv7a a specific file is needed */
457 static int armv7a_l2x_flush_all_data(struct target *target)
458 {
459
460 #define L2X0_CLEAN_INV_WAY 0x7FC
461 int retval = ERROR_FAIL;
462 struct armv7a_common *armv7a = target_to_armv7a(target);
463 struct armv7a_l2x_cache *l2x_cache = (struct armv7a_l2x_cache *)
464 (armv7a->armv7a_mmu.armv7a_cache.l2_cache);
465 uint32_t base = l2x_cache->base;
466 uint32_t l2_way = l2x_cache->way;
467 uint32_t l2_way_val = (1 << l2_way) - 1;
468 retval = armv7a_flush_all_data(target);
469 if (retval != ERROR_OK)
470 return retval;
471 retval = target->type->write_phys_memory(target,
472 (uint32_t)(base+(uint32_t)L2X0_CLEAN_INV_WAY),
473 (uint32_t)4,
474 (uint32_t)1,
475 (uint8_t *)&l2_way_val);
476 return retval;
477 }
478
479 static int armv7a_handle_l2x_cache_info_command(struct command_context *cmd_ctx,
480 struct armv7a_cache_common *armv7a_cache)
481 {
482
483 struct armv7a_l2x_cache *l2x_cache = (struct armv7a_l2x_cache *)
484 (armv7a_cache->l2_cache);
485
486 if (armv7a_cache->ctype == -1) {
487 command_print(cmd_ctx, "cache not yet identified");
488 return ERROR_OK;
489 }
490
491 command_print(cmd_ctx,
492 "L1 D-Cache: linelen %" PRIi32 ", associativity %" PRIi32 ", nsets %" PRIi32 ", cachesize %" PRId32 " KBytes",
493 armv7a_cache->d_u_size.linelen,
494 armv7a_cache->d_u_size.associativity,
495 armv7a_cache->d_u_size.nsets,
496 armv7a_cache->d_u_size.cachesize);
497
498 command_print(cmd_ctx,
499 "L1 I-Cache: linelen %" PRIi32 ", associativity %" PRIi32 ", nsets %" PRIi32 ", cachesize %" PRId32 " KBytes",
500 armv7a_cache->i_size.linelen,
501 armv7a_cache->i_size.associativity,
502 armv7a_cache->i_size.nsets,
503 armv7a_cache->i_size.cachesize);
504 command_print(cmd_ctx, "L2 unified cache Base Address 0x%" PRIx32 ", %" PRId32 " ways",
505 l2x_cache->base, l2x_cache->way);
506
507
508 return ERROR_OK;
509 }
510
511
512 static int armv7a_l2x_cache_init(struct target *target, uint32_t base, uint32_t way)
513 {
514 struct armv7a_l2x_cache *l2x_cache;
515 struct target_list *head = target->head;
516 struct target *curr;
517
518 struct armv7a_common *armv7a = target_to_armv7a(target);
519 l2x_cache = calloc(1, sizeof(struct armv7a_l2x_cache));
520 l2x_cache->base = base;
521 l2x_cache->way = way;
522 /*LOG_INFO("cache l2 initialized base %x way %d",
523 l2x_cache->base,l2x_cache->way);*/
524 if (armv7a->armv7a_mmu.armv7a_cache.l2_cache)
525 LOG_INFO("cache l2 already initialized\n");
526 armv7a->armv7a_mmu.armv7a_cache.l2_cache = l2x_cache;
527 /* initialize l1 / l2x cache function */
528 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache
529 = armv7a_l2x_flush_all_data;
530 armv7a->armv7a_mmu.armv7a_cache.display_cache_info =
531 armv7a_handle_l2x_cache_info_command;
532 /* initialize all target in this cluster (smp target)
533 * l2 cache must be configured after smp declaration */
534 while (head != (struct target_list *)NULL) {
535 curr = head->target;
536 if (curr != target) {
537 armv7a = target_to_armv7a(curr);
538 if (armv7a->armv7a_mmu.armv7a_cache.l2_cache)
539 LOG_ERROR("smp target : cache l2 already initialized\n");
540 armv7a->armv7a_mmu.armv7a_cache.l2_cache = l2x_cache;
541 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache =
542 armv7a_l2x_flush_all_data;
543 armv7a->armv7a_mmu.armv7a_cache.display_cache_info =
544 armv7a_handle_l2x_cache_info_command;
545 }
546 head = head->next;
547 }
548 return JIM_OK;
549 }
550
551 COMMAND_HANDLER(handle_cache_l2x)
552 {
553 struct target *target = get_current_target(CMD_CTX);
554 uint32_t base, way;
555
556 if (CMD_ARGC != 2)
557 return ERROR_COMMAND_SYNTAX_ERROR;
558
559 /* command_print(CMD_CTX, "%s %s", CMD_ARGV[0], CMD_ARGV[1]); */
560 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], base);
561 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], way);
562
563 /* AP address is in bits 31:24 of DP_SELECT */
564 armv7a_l2x_cache_init(target, base, way);
565
566 return ERROR_OK;
567 }
568
569 int armv7a_handle_cache_info_command(struct command_context *cmd_ctx,
570 struct armv7a_cache_common *armv7a_cache)
571 {
572 if (armv7a_cache->ctype == -1) {
573 command_print(cmd_ctx, "cache not yet identified");
574 return ERROR_OK;
575 }
576
577 if (armv7a_cache->display_cache_info)
578 armv7a_cache->display_cache_info(cmd_ctx, armv7a_cache);
579 return ERROR_OK;
580 }
581
582 /* retrieve core id cluster id */
583 static int armv7a_read_mpidr(struct target *target)
584 {
585 int retval = ERROR_FAIL;
586 struct armv7a_common *armv7a = target_to_armv7a(target);
587 struct arm_dpm *dpm = armv7a->arm.dpm;
588 uint32_t mpidr;
589 retval = dpm->prepare(dpm);
590 if (retval != ERROR_OK)
591 goto done;
592 /* MRC p15,0,<Rd>,c0,c0,5; read Multiprocessor ID register*/
593
594 retval = dpm->instr_read_data_r0(dpm,
595 ARMV4_5_MRC(15, 0, 0, 0, 0, 5),
596 &mpidr);
597 if (retval != ERROR_OK)
598 goto done;
599
600 /* ARMv7R uses a different format for MPIDR.
601 * When configured uniprocessor (most R cores) it reads as 0.
602 * This will need to be implemented for multiprocessor ARMv7R cores. */
603 if (armv7a->is_armv7r) {
604 if (mpidr)
605 LOG_ERROR("MPIDR nonzero in ARMv7-R target");
606 goto done;
607 }
608
609 if (mpidr & 1<<31) {
610 armv7a->multi_processor_system = (mpidr >> 30) & 1;
611 armv7a->cluster_id = (mpidr >> 8) & 0xf;
612 armv7a->cpu_id = mpidr & 0x3;
613 LOG_INFO("%s cluster %x core %x %s", target_name(target),
614 armv7a->cluster_id,
615 armv7a->cpu_id,
616 armv7a->multi_processor_system == 0 ? "multi core" : "mono core");
617
618 } else
619 LOG_ERROR("MPIDR not in multiprocessor format");
620
621 done:
622 dpm->finish(dpm);
623 return retval;
624
625
626 }
627
628 int armv7a_identify_cache(struct target *target)
629 {
630 /* read cache descriptor */
631 int retval = ERROR_FAIL;
632 struct armv7a_common *armv7a = target_to_armv7a(target);
633 struct arm_dpm *dpm = armv7a->arm.dpm;
634 uint32_t cache_selected, clidr;
635 uint32_t cache_i_reg, cache_d_reg;
636 struct armv7a_cache_common *cache = &(armv7a->armv7a_mmu.armv7a_cache);
637 if (!armv7a->is_armv7r)
638 armv7a_read_ttbcr(target);
639 retval = dpm->prepare(dpm);
640
641 if (retval != ERROR_OK)
642 goto done;
643 /* retrieve CLIDR
644 * mrc p15, 1, r0, c0, c0, 1 @ read clidr */
645 retval = dpm->instr_read_data_r0(dpm,
646 ARMV4_5_MRC(15, 1, 0, 0, 0, 1),
647 &clidr);
648 if (retval != ERROR_OK)
649 goto done;
650 clidr = (clidr & 0x7000000) >> 23;
651 LOG_INFO("number of cache level %" PRIx32, (uint32_t)(clidr / 2));
652 if ((clidr / 2) > 1) {
653 /* FIXME not supported present in cortex A8 and later */
654 /* in cortex A7, A15 */
655 LOG_ERROR("cache l2 present :not supported");
656 }
657 /* retrieve selected cache
658 * MRC p15, 2,<Rd>, c0, c0, 0; Read CSSELR */
659 retval = dpm->instr_read_data_r0(dpm,
660 ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
661 &cache_selected);
662 if (retval != ERROR_OK)
663 goto done;
664
665 retval = armv7a->arm.mrc(target, 15,
666 2, 0, /* op1, op2 */
667 0, 0, /* CRn, CRm */
668 &cache_selected);
669 if (retval != ERROR_OK)
670 goto done;
671 /* select instruction cache
672 * MCR p15, 2,<Rd>, c0, c0, 0; Write CSSELR
673 * [0] : 1 instruction cache selection , 0 data cache selection */
674 retval = dpm->instr_write_data_r0(dpm,
675 ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
676 1);
677 if (retval != ERROR_OK)
678 goto done;
679
680 /* read CCSIDR
681 * MRC P15,1,<RT>,C0, C0,0 ;on cortex A9 read CCSIDR
682 * [2:0] line size 001 eight word per line
683 * [27:13] NumSet 0x7f 16KB, 0xff 32Kbytes, 0x1ff 64Kbytes */
684 retval = dpm->instr_read_data_r0(dpm,
685 ARMV4_5_MRC(15, 1, 0, 0, 0, 0),
686 &cache_i_reg);
687 if (retval != ERROR_OK)
688 goto done;
689
690 /* select data cache*/
691 retval = dpm->instr_write_data_r0(dpm,
692 ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
693 0);
694 if (retval != ERROR_OK)
695 goto done;
696
697 retval = dpm->instr_read_data_r0(dpm,
698 ARMV4_5_MRC(15, 1, 0, 0, 0, 0),
699 &cache_d_reg);
700 if (retval != ERROR_OK)
701 goto done;
702
703 /* restore selected cache */
704 dpm->instr_write_data_r0(dpm,
705 ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
706 cache_selected);
707
708 if (retval != ERROR_OK)
709 goto done;
710 dpm->finish(dpm);
711
712 /* put fake type */
713 cache->d_u_size.linelen = 16 << (cache_d_reg & 0x7);
714 cache->d_u_size.cachesize = (((cache_d_reg >> 13) & 0x7fff)+1)/8;
715 cache->d_u_size.nsets = (cache_d_reg >> 13) & 0x7fff;
716 cache->d_u_size.associativity = ((cache_d_reg >> 3) & 0x3ff) + 1;
717 /* compute info for set way operation on cache */
718 cache->d_u_size.index_shift = (cache_d_reg & 0x7) + 4;
719 cache->d_u_size.index = (cache_d_reg >> 13) & 0x7fff;
720 cache->d_u_size.way = ((cache_d_reg >> 3) & 0x3ff);
721 cache->d_u_size.way_shift = cache->d_u_size.way + 1;
722 {
723 int i = 0;
724 while (((cache->d_u_size.way_shift >> i) & 1) != 1)
725 i++;
726 cache->d_u_size.way_shift = 32-i;
727 }
728 #if 0
729 LOG_INFO("data cache index %d << %d, way %d << %d",
730 cache->d_u_size.index, cache->d_u_size.index_shift,
731 cache->d_u_size.way,
732 cache->d_u_size.way_shift);
733
734 LOG_INFO("data cache %d bytes %d KBytes asso %d ways",
735 cache->d_u_size.linelen,
736 cache->d_u_size.cachesize,
737 cache->d_u_size.associativity);
738 #endif
739 cache->i_size.linelen = 16 << (cache_i_reg & 0x7);
740 cache->i_size.associativity = ((cache_i_reg >> 3) & 0x3ff) + 1;
741 cache->i_size.nsets = (cache_i_reg >> 13) & 0x7fff;
742 cache->i_size.cachesize = (((cache_i_reg >> 13) & 0x7fff)+1)/8;
743 /* compute info for set way operation on cache */
744 cache->i_size.index_shift = (cache_i_reg & 0x7) + 4;
745 cache->i_size.index = (cache_i_reg >> 13) & 0x7fff;
746 cache->i_size.way = ((cache_i_reg >> 3) & 0x3ff);
747 cache->i_size.way_shift = cache->i_size.way + 1;
748 {
749 int i = 0;
750 while (((cache->i_size.way_shift >> i) & 1) != 1)
751 i++;
752 cache->i_size.way_shift = 32-i;
753 }
754 #if 0
755 LOG_INFO("instruction cache index %d << %d, way %d << %d",
756 cache->i_size.index, cache->i_size.index_shift,
757 cache->i_size.way, cache->i_size.way_shift);
758
759 LOG_INFO("instruction cache %d bytes %d KBytes asso %d ways",
760 cache->i_size.linelen,
761 cache->i_size.cachesize,
762 cache->i_size.associativity);
763 #endif
764 /* if no l2 cache initialize l1 data cache flush function function */
765 if (armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache == NULL) {
766 armv7a->armv7a_mmu.armv7a_cache.display_cache_info =
767 armv7a_handle_inner_cache_info_command;
768 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache =
769 armv7a_flush_all_data;
770 }
771 armv7a->armv7a_mmu.armv7a_cache.ctype = 0;
772
773 done:
774 dpm->finish(dpm);
775 armv7a_read_mpidr(target);
776 return retval;
777
778 }
779
780 int armv7a_init_arch_info(struct target *target, struct armv7a_common *armv7a)
781 {
782 struct arm *arm = &armv7a->arm;
783 arm->arch_info = armv7a;
784 target->arch_info = &armv7a->arm;
785 /* target is useful in all function arm v4 5 compatible */
786 armv7a->arm.target = target;
787 armv7a->arm.common_magic = ARM_COMMON_MAGIC;
788 armv7a->common_magic = ARMV7_COMMON_MAGIC;
789 armv7a->armv7a_mmu.armv7a_cache.l2_cache = NULL;
790 armv7a->armv7a_mmu.armv7a_cache.ctype = -1;
791 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache = NULL;
792 armv7a->armv7a_mmu.armv7a_cache.display_cache_info = NULL;
793 return ERROR_OK;
794 }
795
796 int armv7a_arch_state(struct target *target)
797 {
798 static const char *state[] = {
799 "disabled", "enabled"
800 };
801
802 struct armv7a_common *armv7a = target_to_armv7a(target);
803 struct arm *arm = &armv7a->arm;
804
805 if (armv7a->common_magic != ARMV7_COMMON_MAGIC) {
806 LOG_ERROR("BUG: called for a non-ARMv7A target");
807 return ERROR_COMMAND_SYNTAX_ERROR;
808 }
809
810 arm_arch_state(target);
811
812 if (armv7a->is_armv7r) {
813 LOG_USER("D-Cache: %s, I-Cache: %s",
814 state[armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled],
815 state[armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled]);
816 } else {
817 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s",
818 state[armv7a->armv7a_mmu.mmu_enabled],
819 state[armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled],
820 state[armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled]);
821 }
822
823 if (arm->core_mode == ARM_MODE_ABT)
824 armv7a_show_fault_registers(target);
825 if (target->debug_reason == DBG_REASON_WATCHPOINT)
826 LOG_USER("Watchpoint triggered at PC %#08x",
827 (unsigned) armv7a->dpm.wp_pc);
828
829 return ERROR_OK;
830 }
831
832 static const struct command_registration l2_cache_commands[] = {
833 {
834 .name = "l2x",
835 .handler = handle_cache_l2x,
836 .mode = COMMAND_EXEC,
837 .help = "configure l2x cache "
838 "",
839 .usage = "[base_addr] [number_of_way]",
840 },
841 COMMAND_REGISTRATION_DONE
842
843 };
844
845 const struct command_registration l2x_cache_command_handlers[] = {
846 {
847 .name = "cache_config",
848 .mode = COMMAND_EXEC,
849 .help = "cache configuration for a target",
850 .usage = "",
851 .chain = l2_cache_commands,
852 },
853 COMMAND_REGISTRATION_DONE
854 };
855
856
857 const struct command_registration armv7a_command_handlers[] = {
858 {
859 .chain = dap_command_handlers,
860 },
861 {
862 .chain = l2x_cache_command_handlers,
863 },
864 COMMAND_REGISTRATION_DONE
865 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)