Skip to content

Commit 7cc1720

Browse files
Chen Ridonghtejun
authored andcommitted
cpuset: remove v1-specific code from generate_sched_domains
Following the introduction of cpuset1_generate_sched_domains() for v1 in the previous patch, v1-specific logic can now be removed from the generic generate_sched_domains(). This patch cleans up the v1-only code and ensures uf_node is only visible when CONFIG_CPUSETS_V1=y. Signed-off-by: Chen Ridong <chenridong@huawei.com> Reviewed-by: Waiman Long <longman@redhat.com> Signed-off-by: Tejun Heo <tj@kernel.org>
1 parent 6e1d31c commit 7cc1720

3 files changed

Lines changed: 28 additions & 129 deletions

File tree

kernel/cgroup/cpuset-internal.h

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -175,14 +175,14 @@ struct cpuset {
175175
/* Handle for cpuset.cpus.partition */
176176
struct cgroup_file partition_file;
177177

178-
/* Used to merge intersecting subsets for generate_sched_domains */
179-
struct uf_node node;
180-
181178
#ifdef CONFIG_CPUSETS_V1
182179
struct fmeter fmeter; /* memory_pressure filter */
183180

184181
/* for custom sched domain */
185182
int relax_domain_level;
183+
184+
/* Used to merge intersecting subsets for generate_sched_domains */
185+
struct uf_node node;
186186
#endif
187187
};
188188

@@ -314,8 +314,6 @@ void cpuset1_hotplug_update_tasks(struct cpuset *cs,
314314
int cpuset1_validate_change(struct cpuset *cur, struct cpuset *trial);
315315
void cpuset1_init(struct cpuset *cs);
316316
void cpuset1_online_css(struct cgroup_subsys_state *css);
317-
void update_domain_attr_tree(struct sched_domain_attr *dattr,
318-
struct cpuset *root_cs);
319317
int cpuset1_generate_sched_domains(cpumask_var_t **domains,
320318
struct sched_domain_attr **attributes);
321319

@@ -330,8 +328,6 @@ static inline int cpuset1_validate_change(struct cpuset *cur,
330328
struct cpuset *trial) { return 0; }
331329
static inline void cpuset1_init(struct cpuset *cs) {}
332330
static inline void cpuset1_online_css(struct cgroup_subsys_state *css) {}
333-
static inline void update_domain_attr_tree(struct sched_domain_attr *dattr,
334-
struct cpuset *root_cs) {}
335331
static inline int cpuset1_generate_sched_domains(cpumask_var_t **domains,
336332
struct sched_domain_attr **attributes) { return 0; };
337333

kernel/cgroup/cpuset-v1.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -560,7 +560,7 @@ update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
560560
dattr->relax_domain_level = c->relax_domain_level;
561561
}
562562

563-
void update_domain_attr_tree(struct sched_domain_attr *dattr,
563+
static void update_domain_attr_tree(struct sched_domain_attr *dattr,
564564
struct cpuset *root_cs)
565565
{
566566
struct cpuset *cp;

kernel/cgroup/cpuset.c

Lines changed: 24 additions & 121 deletions
Original file line numberDiff line numberDiff line change
@@ -789,89 +789,40 @@ static int generate_sched_domains(cpumask_var_t **domains,
789789
{
790790
struct cpuset *cp; /* top-down scan of cpusets */
791791
struct cpuset **csa; /* array of all cpuset ptrs */
792-
int csn; /* how many cpuset ptrs in csa so far */
793792
int i, j; /* indices for partition finding loops */
794793
cpumask_var_t *doms; /* resulting partition; i.e. sched domains */
795794
struct sched_domain_attr *dattr; /* attributes for custom domains */
796795
int ndoms = 0; /* number of sched domains in result */
797-
int nslot; /* next empty doms[] struct cpumask slot */
798796
struct cgroup_subsys_state *pos_css;
799-
bool root_load_balance = is_sched_load_balance(&top_cpuset);
800-
bool cgrpv2 = cpuset_v2();
801-
int nslot_update;
802797

803-
if (!cgrpv2)
798+
if (!cpuset_v2())
804799
return cpuset1_generate_sched_domains(domains, attributes);
805800

806801
doms = NULL;
807802
dattr = NULL;
808803
csa = NULL;
809804

810805
/* Special case for the 99% of systems with one, full, sched domain */
811-
if (root_load_balance && cpumask_empty(subpartitions_cpus)) {
812-
single_root_domain:
806+
if (cpumask_empty(subpartitions_cpus)) {
813807
ndoms = 1;
814-
doms = alloc_sched_domains(ndoms);
815-
if (!doms)
816-
goto done;
817-
818-
dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
819-
if (dattr) {
820-
*dattr = SD_ATTR_INIT;
821-
update_domain_attr_tree(dattr, &top_cpuset);
822-
}
823-
cpumask_and(doms[0], top_cpuset.effective_cpus,
824-
housekeeping_cpumask(HK_TYPE_DOMAIN));
825-
826-
goto done;
808+
/* !csa will be checked and can be correctly handled */
809+
goto generate_doms;
827810
}
828811

829812
csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL);
830813
if (!csa)
831814
goto done;
832-
csn = 0;
833815

816+
/* Find how many partitions and cache them to csa[] */
834817
rcu_read_lock();
835-
if (root_load_balance)
836-
csa[csn++] = &top_cpuset;
837818
cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
838-
if (cp == &top_cpuset)
839-
continue;
840-
841-
if (cgrpv2)
842-
goto v2;
843-
844-
/*
845-
* v1:
846-
* Continue traversing beyond @cp iff @cp has some CPUs and
847-
* isn't load balancing. The former is obvious. The
848-
* latter: All child cpusets contain a subset of the
849-
* parent's cpus, so just skip them, and then we call
850-
* update_domain_attr_tree() to calc relax_domain_level of
851-
* the corresponding sched domain.
852-
*/
853-
if (!cpumask_empty(cp->cpus_allowed) &&
854-
!(is_sched_load_balance(cp) &&
855-
cpumask_intersects(cp->cpus_allowed,
856-
housekeeping_cpumask(HK_TYPE_DOMAIN))))
857-
continue;
858-
859-
if (is_sched_load_balance(cp) &&
860-
!cpumask_empty(cp->effective_cpus))
861-
csa[csn++] = cp;
862-
863-
/* skip @cp's subtree */
864-
pos_css = css_rightmost_descendant(pos_css);
865-
continue;
866-
867-
v2:
868819
/*
869820
* Only valid partition roots that are not isolated and with
870-
* non-empty effective_cpus will be saved into csn[].
821+
* non-empty effective_cpus will be saved into csa[].
871822
*/
872823
if ((cp->partition_root_state == PRS_ROOT) &&
873824
!cpumask_empty(cp->effective_cpus))
874-
csa[csn++] = cp;
825+
csa[ndoms++] = cp;
875826

876827
/*
877828
* Skip @cp's subtree if not a partition root and has no
@@ -882,40 +833,18 @@ static int generate_sched_domains(cpumask_var_t **domains,
882833
}
883834
rcu_read_unlock();
884835

885-
/*
886-
* If there are only isolated partitions underneath the cgroup root,
887-
* we can optimize out unneeded sched domains scanning.
888-
*/
889-
if (root_load_balance && (csn == 1))
890-
goto single_root_domain;
891-
892-
for (i = 0; i < csn; i++)
893-
uf_node_init(&csa[i]->node);
894-
895-
/* Merge overlapping cpusets */
896-
for (i = 0; i < csn; i++) {
897-
for (j = i + 1; j < csn; j++) {
898-
if (cpusets_overlap(csa[i], csa[j])) {
836+
for (i = 0; i < ndoms; i++) {
837+
for (j = i + 1; j < ndoms; j++) {
838+
if (cpusets_overlap(csa[i], csa[j]))
899839
/*
900840
* Cgroup v2 shouldn't pass down overlapping
901841
* partition root cpusets.
902842
*/
903-
WARN_ON_ONCE(cgrpv2);
904-
uf_union(&csa[i]->node, &csa[j]->node);
905-
}
843+
WARN_ON_ONCE(1);
906844
}
907845
}
908846

909-
/* Count the total number of domains */
910-
for (i = 0; i < csn; i++) {
911-
if (uf_find(&csa[i]->node) == &csa[i]->node)
912-
ndoms++;
913-
}
914-
915-
/*
916-
* Now we know how many domains to create.
917-
* Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
918-
*/
847+
generate_doms:
919848
doms = alloc_sched_domains(ndoms);
920849
if (!doms)
921850
goto done;
@@ -932,45 +861,19 @@ static int generate_sched_domains(cpumask_var_t **domains,
932861
* to SD_ATTR_INIT. Also non-isolating partition root CPUs are a
933862
* subset of HK_TYPE_DOMAIN housekeeping CPUs.
934863
*/
935-
if (cgrpv2) {
936-
for (i = 0; i < ndoms; i++) {
937-
/*
938-
* The top cpuset may contain some boot time isolated
939-
* CPUs that need to be excluded from the sched domain.
940-
*/
941-
if (csa[i] == &top_cpuset)
942-
cpumask_and(doms[i], csa[i]->effective_cpus,
943-
housekeeping_cpumask(HK_TYPE_DOMAIN));
944-
else
945-
cpumask_copy(doms[i], csa[i]->effective_cpus);
946-
if (dattr)
947-
dattr[i] = SD_ATTR_INIT;
948-
}
949-
goto done;
950-
}
951-
952-
for (nslot = 0, i = 0; i < csn; i++) {
953-
nslot_update = 0;
954-
for (j = i; j < csn; j++) {
955-
if (uf_find(&csa[j]->node) == &csa[i]->node) {
956-
struct cpumask *dp = doms[nslot];
957-
958-
if (i == j) {
959-
nslot_update = 1;
960-
cpumask_clear(dp);
961-
if (dattr)
962-
*(dattr + nslot) = SD_ATTR_INIT;
963-
}
964-
cpumask_or(dp, dp, csa[j]->effective_cpus);
965-
cpumask_and(dp, dp, housekeeping_cpumask(HK_TYPE_DOMAIN));
966-
if (dattr)
967-
update_domain_attr_tree(dattr + nslot, csa[j]);
968-
}
969-
}
970-
if (nslot_update)
971-
nslot++;
864+
for (i = 0; i < ndoms; i++) {
865+
/*
866+
* The top cpuset may contain some boot time isolated
867+
* CPUs that need to be excluded from the sched domain.
868+
*/
869+
if (!csa || csa[i] == &top_cpuset)
870+
cpumask_and(doms[i], top_cpuset.effective_cpus,
871+
housekeeping_cpumask(HK_TYPE_DOMAIN));
872+
else
873+
cpumask_copy(doms[i], csa[i]->effective_cpus);
874+
if (dattr)
875+
dattr[i] = SD_ATTR_INIT;
972876
}
973-
BUG_ON(nslot != ndoms);
974877

975878
done:
976879
kfree(csa);

0 commit comments

Comments
 (0)