2424#include <linux/prctl.h>
2525#include <linux/sched/task_stack.h>
2626
27+ #include <asm/insn.h>
2728#include <asm/spectre.h>
2829#include <asm/traps.h>
30+ #include <asm/vectors.h>
31+ #include <asm/virt.h>
2932
3033/*
3134 * We try to ensure that the mitigation state can never change as the result of
@@ -814,13 +817,168 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
814817 }
815818}
816819
820+ /*
821+ * Spectre BHB.
822+ *
823+ * A CPU is either:
824+ * - Mitigated by a branchy loop a CPU specific number of times, and listed
825+ * in our "loop mitigated list".
826+ * - Mitigated in software by the firmware Spectre v2 call.
827+ * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
828+ * software mitigation in the vectors is needed.
829+ * - Has CSV2.3, so is unaffected.
830+ */
817831static enum mitigation_state spectre_bhb_state ;
818832
819833enum mitigation_state arm64_get_spectre_bhb_state (void )
820834{
821835 return spectre_bhb_state ;
822836}
823837
838+ /*
839+ * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
840+ * SCOPE_SYSTEM call will give the right answer.
841+ */
842+ u8 spectre_bhb_loop_affected (int scope )
843+ {
844+ u8 k = 0 ;
845+ static u8 max_bhb_k ;
846+
847+ if (scope == SCOPE_LOCAL_CPU ) {
848+ static const struct midr_range spectre_bhb_k32_list [] = {
849+ MIDR_ALL_VERSIONS (MIDR_CORTEX_A78 ),
850+ MIDR_ALL_VERSIONS (MIDR_CORTEX_A78C ),
851+ MIDR_ALL_VERSIONS (MIDR_CORTEX_X1 ),
852+ MIDR_ALL_VERSIONS (MIDR_CORTEX_A710 ),
853+ MIDR_ALL_VERSIONS (MIDR_CORTEX_X2 ),
854+ MIDR_ALL_VERSIONS (MIDR_NEOVERSE_N2 ),
855+ MIDR_ALL_VERSIONS (MIDR_NEOVERSE_V1 ),
856+ {},
857+ };
858+ static const struct midr_range spectre_bhb_k24_list [] = {
859+ MIDR_ALL_VERSIONS (MIDR_CORTEX_A76 ),
860+ MIDR_ALL_VERSIONS (MIDR_CORTEX_A77 ),
861+ MIDR_ALL_VERSIONS (MIDR_NEOVERSE_N1 ),
862+ {},
863+ };
864+ static const struct midr_range spectre_bhb_k8_list [] = {
865+ MIDR_ALL_VERSIONS (MIDR_CORTEX_A72 ),
866+ MIDR_ALL_VERSIONS (MIDR_CORTEX_A57 ),
867+ {},
868+ };
869+
870+ if (is_midr_in_range_list (read_cpuid_id (), spectre_bhb_k32_list ))
871+ k = 32 ;
872+ else if (is_midr_in_range_list (read_cpuid_id (), spectre_bhb_k24_list ))
873+ k = 24 ;
874+ else if (is_midr_in_range_list (read_cpuid_id (), spectre_bhb_k8_list ))
875+ k = 8 ;
876+
877+ max_bhb_k = max (max_bhb_k , k );
878+ } else {
879+ k = max_bhb_k ;
880+ }
881+
882+ return k ;
883+ }
884+
885+ static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state (void )
886+ {
887+ int ret ;
888+ struct arm_smccc_res res ;
889+
890+ arm_smccc_1_1_invoke (ARM_SMCCC_ARCH_FEATURES_FUNC_ID ,
891+ ARM_SMCCC_ARCH_WORKAROUND_3 , & res );
892+
893+ ret = res .a0 ;
894+ switch (ret ) {
895+ case SMCCC_RET_SUCCESS :
896+ return SPECTRE_MITIGATED ;
897+ case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED :
898+ return SPECTRE_UNAFFECTED ;
899+ default :
900+ fallthrough ;
901+ case SMCCC_RET_NOT_SUPPORTED :
902+ return SPECTRE_VULNERABLE ;
903+ }
904+ }
905+
906+ static bool is_spectre_bhb_fw_affected (int scope )
907+ {
908+ static bool system_affected ;
909+ enum mitigation_state fw_state ;
910+ bool has_smccc = arm_smccc_1_1_get_conduit () != SMCCC_CONDUIT_NONE ;
911+ static const struct midr_range spectre_bhb_firmware_mitigated_list [] = {
912+ MIDR_ALL_VERSIONS (MIDR_CORTEX_A73 ),
913+ MIDR_ALL_VERSIONS (MIDR_CORTEX_A75 ),
914+ {},
915+ };
916+ bool cpu_in_list = is_midr_in_range_list (read_cpuid_id (),
917+ spectre_bhb_firmware_mitigated_list );
918+
919+ if (scope != SCOPE_LOCAL_CPU )
920+ return system_affected ;
921+
922+ fw_state = spectre_bhb_get_cpu_fw_mitigation_state ();
923+ if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED )) {
924+ system_affected = true;
925+ return true;
926+ }
927+
928+ return false;
929+ }
930+
931+ static bool supports_ecbhb (int scope )
932+ {
933+ u64 mmfr1 ;
934+
935+ if (scope == SCOPE_LOCAL_CPU )
936+ mmfr1 = read_sysreg_s (SYS_ID_AA64MMFR1_EL1 );
937+ else
938+ mmfr1 = read_sanitised_ftr_reg (SYS_ID_AA64MMFR1_EL1 );
939+
940+ return cpuid_feature_extract_unsigned_field (mmfr1 ,
941+ ID_AA64MMFR1_ECBHB_SHIFT );
942+ }
943+
944+ bool is_spectre_bhb_affected (const struct arm64_cpu_capabilities * entry ,
945+ int scope )
946+ {
947+ WARN_ON (scope != SCOPE_LOCAL_CPU || preemptible ());
948+
949+ if (supports_csv2p3 (scope ))
950+ return false;
951+
952+ if (spectre_bhb_loop_affected (scope ))
953+ return true;
954+
955+ if (is_spectre_bhb_fw_affected (scope ))
956+ return true;
957+
958+ return false;
959+ }
960+
961+ static void this_cpu_set_vectors (enum arm64_bp_harden_el1_vectors slot )
962+ {
963+ const char * v = arm64_get_bp_hardening_vector (slot );
964+
965+ if (slot < 0 )
966+ return ;
967+
968+ __this_cpu_write (this_cpu_vector , v );
969+
970+ /*
971+ * When KPTI is in use, the vectors are switched when exiting to
972+ * user-space.
973+ */
974+ if (arm64_kernel_unmapped_at_el0 ())
975+ return ;
976+
977+ write_sysreg (v , vbar_el1 );
978+ isb ();
979+ }
980+
981+ #ifdef CONFIG_KVM
824982static int kvm_bhb_get_vecs_size (const char * start )
825983{
826984 if (start == __smccc_workaround_3_smc )
@@ -833,7 +991,7 @@ static int kvm_bhb_get_vecs_size(const char *start)
833991 return 0 ;
834992}
835993
836- void kvm_setup_bhb_slot (const char * hyp_vecs_start )
994+ static void kvm_setup_bhb_slot (const char * hyp_vecs_start )
837995{
838996 int cpu , slot = -1 , size ;
839997 const char * hyp_vecs_end ;
@@ -864,3 +1022,77 @@ void kvm_setup_bhb_slot(const char *hyp_vecs_start)
8641022 __this_cpu_write (bp_hardening_data .template_start , hyp_vecs_start );
8651023 raw_spin_unlock (& bp_lock );
8661024}
1025+ #else
1026+ #define __smccc_workaround_3_smc NULL
1027+ #define __spectre_bhb_loop_k8 NULL
1028+ #define __spectre_bhb_loop_k24 NULL
1029+ #define __spectre_bhb_loop_k32 NULL
1030+
1031+ static void kvm_setup_bhb_slot (const char * hyp_vecs_start ) { }
1032+ #endif /* CONFIG_KVM */
1033+
1034+ void spectre_bhb_enable_mitigation (const struct arm64_cpu_capabilities * entry )
1035+ {
1036+ enum mitigation_state fw_state , state = SPECTRE_VULNERABLE ;
1037+
1038+ if (!is_spectre_bhb_affected (entry , SCOPE_LOCAL_CPU ))
1039+ return ;
1040+
1041+ if (arm64_get_spectre_v2_state () == SPECTRE_VULNERABLE ) {
1042+ /* No point mitigating Spectre-BHB alone. */
1043+ } else if (!IS_ENABLED (CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY )) {
1044+ pr_info_once ("spectre-bhb mitigation disabled by compile time option\n" );
1045+ } else if (cpu_mitigations_off ()) {
1046+ pr_info_once ("spectre-bhb mitigation disabled by command line option\n" );
1047+ } else if (supports_ecbhb (SCOPE_LOCAL_CPU )) {
1048+ state = SPECTRE_MITIGATED ;
1049+ } else if (spectre_bhb_loop_affected (SCOPE_LOCAL_CPU )) {
1050+ switch (spectre_bhb_loop_affected (SCOPE_SYSTEM )) {
1051+ case 8 :
1052+ kvm_setup_bhb_slot (__spectre_bhb_loop_k8 );
1053+ break ;
1054+ case 24 :
1055+ kvm_setup_bhb_slot (__spectre_bhb_loop_k24 );
1056+ break ;
1057+ case 32 :
1058+ kvm_setup_bhb_slot (__spectre_bhb_loop_k32 );
1059+ break ;
1060+ default :
1061+ WARN_ON_ONCE (1 );
1062+ }
1063+ this_cpu_set_vectors (EL1_VECTOR_BHB_LOOP );
1064+
1065+ state = SPECTRE_MITIGATED ;
1066+ } else if (is_spectre_bhb_fw_affected (SCOPE_LOCAL_CPU )) {
1067+ fw_state = spectre_bhb_get_cpu_fw_mitigation_state ();
1068+ if (fw_state == SPECTRE_MITIGATED ) {
1069+ kvm_setup_bhb_slot (__smccc_workaround_3_smc );
1070+ this_cpu_set_vectors (EL1_VECTOR_BHB_FW );
1071+
1072+ state = SPECTRE_MITIGATED ;
1073+ }
1074+ }
1075+
1076+ update_mitigation_state (& spectre_bhb_state , state );
1077+ }
1078+
1079+ /* Patched to correct the immediate */
1080+ void noinstr spectre_bhb_patch_loop_iter (struct alt_instr * alt ,
1081+ __le32 * origptr , __le32 * updptr , int nr_inst )
1082+ {
1083+ u8 rd ;
1084+ u32 insn ;
1085+ u16 loop_count = spectre_bhb_loop_affected (SCOPE_SYSTEM );
1086+
1087+ BUG_ON (nr_inst != 1 ); /* MOV -> MOV */
1088+
1089+ if (!IS_ENABLED (CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY ))
1090+ return ;
1091+
1092+ insn = le32_to_cpu (* origptr );
1093+ rd = aarch64_insn_decode_register (AARCH64_INSN_REGTYPE_RD , insn );
1094+ insn = aarch64_insn_gen_movewide (rd , loop_count , 0 ,
1095+ AARCH64_INSN_VARIANT_64BIT ,
1096+ AARCH64_INSN_MOVEWIDE_ZERO );
1097+ * updptr ++ = cpu_to_le32 (insn );
1098+ }
0 commit comments