|
5 | 5 | #include "../../../include/linux/filter.h" |
6 | 6 | #include "bpf_misc.h" |
7 | 7 |
|
| 8 | +struct { |
| 9 | + __uint(type, BPF_MAP_TYPE_ARRAY); |
| 10 | + __uint(max_entries, 1); |
| 11 | + __type(key, __u32); |
| 12 | + __type(value, __u64); |
| 13 | +} precision_map SEC(".maps"); |
| 14 | + |
8 | 15 | SEC("?raw_tp") |
9 | 16 | __success __log_level(2) |
10 | 17 | __msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10") |
@@ -301,4 +308,338 @@ __naked int bpf_neg_5(void) |
301 | 308 | ::: __clobber_all); |
302 | 309 | } |
303 | 310 |
|
| 311 | +SEC("?raw_tp") |
| 312 | +__success __log_level(2) |
| 313 | +__msg("mark_precise: frame0: regs=r2 stack= before 4: (bf) r3 = r10") |
| 314 | +__msg("mark_precise: frame0: regs=r2 stack= before 3: (db) r2 = atomic64_fetch_add((u64 *)(r10 -8), r2)") |
| 315 | +__msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r2 = 0") |
| 316 | +__msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1") |
| 317 | +__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8") |
| 318 | +__naked int bpf_atomic_fetch_add_precision(void) |
| 319 | +{ |
| 320 | + asm volatile ( |
| 321 | + "r1 = 8;" |
| 322 | + "*(u64 *)(r10 - 8) = r1;" |
| 323 | + "r2 = 0;" |
| 324 | + ".8byte %[fetch_add_insn];" /* r2 = atomic_fetch_add(*(u64 *)(r10 - 8), r2) */ |
| 325 | + "r3 = r10;" |
| 326 | + "r3 += r2;" /* mark_precise */ |
| 327 | + "r0 = 0;" |
| 328 | + "exit;" |
| 329 | + : |
| 330 | + : __imm_insn(fetch_add_insn, |
| 331 | + BPF_ATOMIC_OP(BPF_DW, BPF_ADD | BPF_FETCH, BPF_REG_10, BPF_REG_2, -8)) |
| 332 | + : __clobber_all); |
| 333 | +} |
| 334 | + |
| 335 | +SEC("?raw_tp") |
| 336 | +__success __log_level(2) |
| 337 | +__msg("mark_precise: frame0: regs=r2 stack= before 4: (bf) r3 = r10") |
| 338 | +__msg("mark_precise: frame0: regs=r2 stack= before 3: (db) r2 = atomic64_xchg((u64 *)(r10 -8), r2)") |
| 339 | +__msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r2 = 0") |
| 340 | +__msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1") |
| 341 | +__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8") |
| 342 | +__naked int bpf_atomic_xchg_precision(void) |
| 343 | +{ |
| 344 | + asm volatile ( |
| 345 | + "r1 = 8;" |
| 346 | + "*(u64 *)(r10 - 8) = r1;" |
| 347 | + "r2 = 0;" |
| 348 | + ".8byte %[xchg_insn];" /* r2 = atomic_xchg(*(u64 *)(r10 - 8), r2) */ |
| 349 | + "r3 = r10;" |
| 350 | + "r3 += r2;" /* mark_precise */ |
| 351 | + "r0 = 0;" |
| 352 | + "exit;" |
| 353 | + : |
| 354 | + : __imm_insn(xchg_insn, |
| 355 | + BPF_ATOMIC_OP(BPF_DW, BPF_XCHG, BPF_REG_10, BPF_REG_2, -8)) |
| 356 | + : __clobber_all); |
| 357 | +} |
| 358 | + |
| 359 | +SEC("?raw_tp") |
| 360 | +__success __log_level(2) |
| 361 | +__msg("mark_precise: frame0: regs=r2 stack= before 4: (bf) r3 = r10") |
| 362 | +__msg("mark_precise: frame0: regs=r2 stack= before 3: (db) r2 = atomic64_fetch_or((u64 *)(r10 -8), r2)") |
| 363 | +__msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r2 = 0") |
| 364 | +__msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1") |
| 365 | +__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8") |
| 366 | +__naked int bpf_atomic_fetch_or_precision(void) |
| 367 | +{ |
| 368 | + asm volatile ( |
| 369 | + "r1 = 8;" |
| 370 | + "*(u64 *)(r10 - 8) = r1;" |
| 371 | + "r2 = 0;" |
| 372 | + ".8byte %[fetch_or_insn];" /* r2 = atomic_fetch_or(*(u64 *)(r10 - 8), r2) */ |
| 373 | + "r3 = r10;" |
| 374 | + "r3 += r2;" /* mark_precise */ |
| 375 | + "r0 = 0;" |
| 376 | + "exit;" |
| 377 | + : |
| 378 | + : __imm_insn(fetch_or_insn, |
| 379 | + BPF_ATOMIC_OP(BPF_DW, BPF_OR | BPF_FETCH, BPF_REG_10, BPF_REG_2, -8)) |
| 380 | + : __clobber_all); |
| 381 | +} |
| 382 | + |
| 383 | +SEC("?raw_tp") |
| 384 | +__success __log_level(2) |
| 385 | +__msg("mark_precise: frame0: regs=r2 stack= before 4: (bf) r3 = r10") |
| 386 | +__msg("mark_precise: frame0: regs=r2 stack= before 3: (db) r2 = atomic64_fetch_and((u64 *)(r10 -8), r2)") |
| 387 | +__msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r2 = 0") |
| 388 | +__msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1") |
| 389 | +__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8") |
| 390 | +__naked int bpf_atomic_fetch_and_precision(void) |
| 391 | +{ |
| 392 | + asm volatile ( |
| 393 | + "r1 = 8;" |
| 394 | + "*(u64 *)(r10 - 8) = r1;" |
| 395 | + "r2 = 0;" |
| 396 | + ".8byte %[fetch_and_insn];" /* r2 = atomic_fetch_and(*(u64 *)(r10 - 8), r2) */ |
| 397 | + "r3 = r10;" |
| 398 | + "r3 += r2;" /* mark_precise */ |
| 399 | + "r0 = 0;" |
| 400 | + "exit;" |
| 401 | + : |
| 402 | + : __imm_insn(fetch_and_insn, |
| 403 | + BPF_ATOMIC_OP(BPF_DW, BPF_AND | BPF_FETCH, BPF_REG_10, BPF_REG_2, -8)) |
| 404 | + : __clobber_all); |
| 405 | +} |
| 406 | + |
| 407 | +SEC("?raw_tp") |
| 408 | +__success __log_level(2) |
| 409 | +__msg("mark_precise: frame0: regs=r2 stack= before 4: (bf) r3 = r10") |
| 410 | +__msg("mark_precise: frame0: regs=r2 stack= before 3: (db) r2 = atomic64_fetch_xor((u64 *)(r10 -8), r2)") |
| 411 | +__msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r2 = 0") |
| 412 | +__msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1") |
| 413 | +__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8") |
| 414 | +__naked int bpf_atomic_fetch_xor_precision(void) |
| 415 | +{ |
| 416 | + asm volatile ( |
| 417 | + "r1 = 8;" |
| 418 | + "*(u64 *)(r10 - 8) = r1;" |
| 419 | + "r2 = 0;" |
| 420 | + ".8byte %[fetch_xor_insn];" /* r2 = atomic_fetch_xor(*(u64 *)(r10 - 8), r2) */ |
| 421 | + "r3 = r10;" |
| 422 | + "r3 += r2;" /* mark_precise */ |
| 423 | + "r0 = 0;" |
| 424 | + "exit;" |
| 425 | + : |
| 426 | + : __imm_insn(fetch_xor_insn, |
| 427 | + BPF_ATOMIC_OP(BPF_DW, BPF_XOR | BPF_FETCH, BPF_REG_10, BPF_REG_2, -8)) |
| 428 | + : __clobber_all); |
| 429 | +} |
| 430 | + |
| 431 | +SEC("?raw_tp") |
| 432 | +__success __log_level(2) |
| 433 | +__msg("mark_precise: frame0: regs=r0 stack= before 5: (bf) r3 = r10") |
| 434 | +__msg("mark_precise: frame0: regs=r0 stack= before 4: (db) r0 = atomic64_cmpxchg((u64 *)(r10 -8), r0, r2)") |
| 435 | +__msg("mark_precise: frame0: regs= stack=-8 before 3: (b7) r2 = 0") |
| 436 | +__msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r0 = 0") |
| 437 | +__msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1") |
| 438 | +__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8") |
| 439 | +__naked int bpf_atomic_cmpxchg_precision(void) |
| 440 | +{ |
| 441 | + asm volatile ( |
| 442 | + "r1 = 8;" |
| 443 | + "*(u64 *)(r10 - 8) = r1;" |
| 444 | + "r0 = 0;" |
| 445 | + "r2 = 0;" |
| 446 | + ".8byte %[cmpxchg_insn];" /* r0 = atomic_cmpxchg(*(u64 *)(r10 - 8), r0, r2) */ |
| 447 | + "r3 = r10;" |
| 448 | + "r3 += r0;" /* mark_precise */ |
| 449 | + "r0 = 0;" |
| 450 | + "exit;" |
| 451 | + : |
| 452 | + : __imm_insn(cmpxchg_insn, |
| 453 | + BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_2, -8)) |
| 454 | + : __clobber_all); |
| 455 | +} |
| 456 | + |
| 457 | +/* Regression test for dual precision: Both the fetched value (r2) and |
| 458 | + * a reread of the same stack slot (r3) are tracked for precision. After |
| 459 | + * the atomic operation, the stack slot is STACK_MISC. Thus, the ldx at |
| 460 | + * insn 4 does NOT set INSN_F_STACK_ACCESS. Precision for the stack slot |
| 461 | + * propagates solely through the atomic fetch's load side (insn 3). |
| 462 | + */ |
| 463 | +SEC("?raw_tp") |
| 464 | +__success __log_level(2) |
| 465 | +__msg("mark_precise: frame0: regs=r2,r3 stack= before 4: (79) r3 = *(u64 *)(r10 -8)") |
| 466 | +__msg("mark_precise: frame0: regs=r2 stack= before 3: (db) r2 = atomic64_fetch_add((u64 *)(r10 -8), r2)") |
| 467 | +__msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r2 = 0") |
| 468 | +__msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1") |
| 469 | +__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8") |
| 470 | +__naked int bpf_atomic_fetch_add_dual_precision(void) |
| 471 | +{ |
| 472 | + asm volatile ( |
| 473 | + "r1 = 8;" |
| 474 | + "*(u64 *)(r10 - 8) = r1;" |
| 475 | + "r2 = 0;" |
| 476 | + ".8byte %[fetch_add_insn];" /* r2 = atomic_fetch_add(*(u64 *)(r10 - 8), r2) */ |
| 477 | + "r3 = *(u64 *)(r10 - 8);" |
| 478 | + "r4 = r2;" |
| 479 | + "r4 += r3;" |
| 480 | + "r4 &= 7;" |
| 481 | + "r5 = r10;" |
| 482 | + "r5 += r4;" /* mark_precise */ |
| 483 | + "r0 = 0;" |
| 484 | + "exit;" |
| 485 | + : |
| 486 | + : __imm_insn(fetch_add_insn, |
| 487 | + BPF_ATOMIC_OP(BPF_DW, BPF_ADD | BPF_FETCH, BPF_REG_10, BPF_REG_2, -8)) |
| 488 | + : __clobber_all); |
| 489 | +} |
| 490 | + |
| 491 | +SEC("?raw_tp") |
| 492 | +__success __log_level(2) |
| 493 | +__msg("mark_precise: frame0: regs=r0,r3 stack= before 5: (79) r3 = *(u64 *)(r10 -8)") |
| 494 | +__msg("mark_precise: frame0: regs=r0 stack= before 4: (db) r0 = atomic64_cmpxchg((u64 *)(r10 -8), r0, r2)") |
| 495 | +__msg("mark_precise: frame0: regs= stack=-8 before 3: (b7) r2 = 0") |
| 496 | +__msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r0 = 8") |
| 497 | +__msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1") |
| 498 | +__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8") |
| 499 | +__naked int bpf_atomic_cmpxchg_dual_precision(void) |
| 500 | +{ |
| 501 | + asm volatile ( |
| 502 | + "r1 = 8;" |
| 503 | + "*(u64 *)(r10 - 8) = r1;" |
| 504 | + "r0 = 8;" |
| 505 | + "r2 = 0;" |
| 506 | + ".8byte %[cmpxchg_insn];" /* r0 = atomic_cmpxchg(*(u64 *)(r10 - 8), r0, r2) */ |
| 507 | + "r3 = *(u64 *)(r10 - 8);" |
| 508 | + "r4 = r0;" |
| 509 | + "r4 += r3;" |
| 510 | + "r4 &= 7;" |
| 511 | + "r5 = r10;" |
| 512 | + "r5 += r4;" /* mark_precise */ |
| 513 | + "r0 = 0;" |
| 514 | + "exit;" |
| 515 | + : |
| 516 | + : __imm_insn(cmpxchg_insn, |
| 517 | + BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_2, -8)) |
| 518 | + : __clobber_all); |
| 519 | +} |
| 520 | + |
| 521 | +SEC("?raw_tp") |
| 522 | +__success __log_level(2) |
| 523 | +__msg("mark_precise: frame0: regs=r1 stack= before 10: (57) r1 &= 7") |
| 524 | +__msg("mark_precise: frame0: regs=r1 stack= before 9: (db) r1 = atomic64_fetch_add((u64 *)(r0 +0), r1)") |
| 525 | +__not_msg("falling back to forcing all scalars precise") |
| 526 | +__naked int bpf_atomic_fetch_add_map_precision(void) |
| 527 | +{ |
| 528 | + asm volatile ( |
| 529 | + "r1 = 0;" |
| 530 | + "*(u64 *)(r10 - 8) = r1;" |
| 531 | + "r2 = r10;" |
| 532 | + "r2 += -8;" |
| 533 | + "r1 = %[precision_map] ll;" |
| 534 | + "call %[bpf_map_lookup_elem];" |
| 535 | + "if r0 == 0 goto 1f;" |
| 536 | + "r1 = 0;" |
| 537 | + ".8byte %[fetch_add_insn];" /* r1 = atomic_fetch_add(*(u64 *)(r0 + 0), r1) */ |
| 538 | + "r1 &= 7;" |
| 539 | + "r2 = r10;" |
| 540 | + "r2 += r1;" /* mark_precise */ |
| 541 | + "1: r0 = 0;" |
| 542 | + "exit;" |
| 543 | + : |
| 544 | + : __imm_addr(precision_map), |
| 545 | + __imm(bpf_map_lookup_elem), |
| 546 | + __imm_insn(fetch_add_insn, |
| 547 | + BPF_ATOMIC_OP(BPF_DW, BPF_ADD | BPF_FETCH, BPF_REG_0, BPF_REG_1, 0)) |
| 548 | + : __clobber_all); |
| 549 | +} |
| 550 | + |
| 551 | +SEC("?raw_tp") |
| 552 | +__success __log_level(2) |
| 553 | +__msg("mark_precise: frame0: regs=r0 stack= before 12: (57) r0 &= 7") |
| 554 | +__msg("mark_precise: frame0: regs=r0 stack= before 11: (db) r0 = atomic64_cmpxchg((u64 *)(r6 +0), r0, r1)") |
| 555 | +__not_msg("falling back to forcing all scalars precise") |
| 556 | +__naked int bpf_atomic_cmpxchg_map_precision(void) |
| 557 | +{ |
| 558 | + asm volatile ( |
| 559 | + "r1 = 0;" |
| 560 | + "*(u64 *)(r10 - 8) = r1;" |
| 561 | + "r2 = r10;" |
| 562 | + "r2 += -8;" |
| 563 | + "r1 = %[precision_map] ll;" |
| 564 | + "call %[bpf_map_lookup_elem];" |
| 565 | + "if r0 == 0 goto 1f;" |
| 566 | + "r6 = r0;" |
| 567 | + "r0 = 0;" |
| 568 | + "r1 = 0;" |
| 569 | + ".8byte %[cmpxchg_insn];" /* r0 = atomic_cmpxchg(*(u64 *)(r6 + 0), r0, r1) */ |
| 570 | + "r0 &= 7;" |
| 571 | + "r2 = r10;" |
| 572 | + "r2 += r0;" /* mark_precise */ |
| 573 | + "1: r0 = 0;" |
| 574 | + "exit;" |
| 575 | + : |
| 576 | + : __imm_addr(precision_map), |
| 577 | + __imm(bpf_map_lookup_elem), |
| 578 | + __imm_insn(cmpxchg_insn, |
| 579 | + BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_6, BPF_REG_1, 0)) |
| 580 | + : __clobber_all); |
| 581 | +} |
| 582 | + |
| 583 | +SEC("?raw_tp") |
| 584 | +__success __log_level(2) |
| 585 | +__msg("mark_precise: frame0: regs=r1 stack= before 10: (57) r1 &= 7") |
| 586 | +__msg("mark_precise: frame0: regs=r1 stack= before 9: (c3) r1 = atomic_fetch_add((u32 *)(r0 +0), r1)") |
| 587 | +__not_msg("falling back to forcing all scalars precise") |
| 588 | +__naked int bpf_atomic_fetch_add_32bit_precision(void) |
| 589 | +{ |
| 590 | + asm volatile ( |
| 591 | + "r1 = 0;" |
| 592 | + "*(u64 *)(r10 - 8) = r1;" |
| 593 | + "r2 = r10;" |
| 594 | + "r2 += -8;" |
| 595 | + "r1 = %[precision_map] ll;" |
| 596 | + "call %[bpf_map_lookup_elem];" |
| 597 | + "if r0 == 0 goto 1f;" |
| 598 | + "r1 = 0;" |
| 599 | + ".8byte %[fetch_add_insn];" /* r1 = atomic_fetch_add(*(u32 *)(r0 + 0), r1) */ |
| 600 | + "r1 &= 7;" |
| 601 | + "r2 = r10;" |
| 602 | + "r2 += r1;" /* mark_precise */ |
| 603 | + "1: r0 = 0;" |
| 604 | + "exit;" |
| 605 | + : |
| 606 | + : __imm_addr(precision_map), |
| 607 | + __imm(bpf_map_lookup_elem), |
| 608 | + __imm_insn(fetch_add_insn, |
| 609 | + BPF_ATOMIC_OP(BPF_W, BPF_ADD | BPF_FETCH, BPF_REG_0, BPF_REG_1, 0)) |
| 610 | + : __clobber_all); |
| 611 | +} |
| 612 | + |
| 613 | +SEC("?raw_tp") |
| 614 | +__success __log_level(2) |
| 615 | +__msg("mark_precise: frame0: regs=r0 stack= before 12: (57) r0 &= 7") |
| 616 | +__msg("mark_precise: frame0: regs=r0 stack= before 11: (c3) r0 = atomic_cmpxchg((u32 *)(r6 +0), r0, r1)") |
| 617 | +__not_msg("falling back to forcing all scalars precise") |
| 618 | +__naked int bpf_atomic_cmpxchg_32bit_precision(void) |
| 619 | +{ |
| 620 | + asm volatile ( |
| 621 | + "r1 = 0;" |
| 622 | + "*(u64 *)(r10 - 8) = r1;" |
| 623 | + "r2 = r10;" |
| 624 | + "r2 += -8;" |
| 625 | + "r1 = %[precision_map] ll;" |
| 626 | + "call %[bpf_map_lookup_elem];" |
| 627 | + "if r0 == 0 goto 1f;" |
| 628 | + "r6 = r0;" |
| 629 | + "r0 = 0;" |
| 630 | + "r1 = 0;" |
| 631 | + ".8byte %[cmpxchg_insn];" /* r0 = atomic_cmpxchg(*(u32 *)(r6 + 0), r0, r1) */ |
| 632 | + "r0 &= 7;" |
| 633 | + "r2 = r10;" |
| 634 | + "r2 += r0;" /* mark_precise */ |
| 635 | + "1: r0 = 0;" |
| 636 | + "exit;" |
| 637 | + : |
| 638 | + : __imm_addr(precision_map), |
| 639 | + __imm(bpf_map_lookup_elem), |
| 640 | + __imm_insn(cmpxchg_insn, |
| 641 | + BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_6, BPF_REG_1, 0)) |
| 642 | + : __clobber_all); |
| 643 | +} |
| 644 | + |
304 | 645 | char _license[] SEC("license") = "GPL"; |
0 commit comments