Skip to content

Commit 4356a5a

Browse files
committed
fixed rna channel label typo
1 parent 18c1e96 commit 4356a5a

2 files changed

Lines changed: 38 additions & 39 deletions

File tree

src/xenium_analysis_tools/utils/plot_utils.py

Lines changed: 0 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -18,20 +18,3 @@ def get_vals_perc(img, chan, vmin_val=None, vmax_val=None, vmin_perc=None, vmax_
1818

1919
return vmin, vmax
2020

21-
def get_channel_name(chan, print_chan_names_only=False):
22-
channel_aliases = {'DAPI': ['dapi','nuclear'],
23-
'ATP1A1/CD45/E-Cadherin': ['boundary'],
24-
'18S': ['rna, RNA'],
25-
'AlphaSMA/Vimentin': ['protein']
26-
}
27-
if print_chan_names_only:
28-
chan_names = sd.models.get_channel_names(section_sdata[image_name])
29-
print('Available channel names:')
30-
for name in chan_names:
31-
print(f' - {name}')
32-
return None
33-
for chan_label, aliases in channel_aliases.items():
34-
for alias in aliases:
35-
if alias.lower() in chan.lower():
36-
return chan_label
37-
return chan

src/xenium_analysis_tools/utils/sd_utils.py

Lines changed: 38 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,24 @@
66
import json
77
from pathlib import Path
88

9+
def get_channel_name(image, chan, print_chan_names_only=False):
10+
channel_aliases = {'DAPI': ['dapi','nuclear'],
11+
'ATP1A1/CD45/E-Cadherin': ['boundary'],
12+
'18S': ['rna', 'RNA'],
13+
'AlphaSMA/Vimentin': ['protein']
14+
}
15+
if print_chan_names_only:
16+
chan_names = sd.models.get_channel_names(image)
17+
print('Available channel names:')
18+
for name in chan_names:
19+
print(f' - {name}')
20+
return None
21+
for chan_label, aliases in channel_aliases.items():
22+
for alias in aliases:
23+
if alias.lower() in chan.lower():
24+
return chan_label
25+
return chan
26+
927
def get_dataset_paths(dataset_id,
1028
data_root=Path('/root/capsule/data'),
1129
scratch_root=Path('/root/capsule/scratch'),
@@ -99,46 +117,44 @@ def add_micron_coord_sys(sdata, pixel_size=None, z_step=None):
99117
)
100118
return sdata
101119

102-
def add_mapped_cells_cols(sdata, mapped_h5ad_path):
120+
def add_mapped_cells_cols(adata, mapped_adata):
103121
import scanpy as sc
104-
mapped_h5ad = sc.read_h5ad(mapped_h5ad_path)
105-
mapping_obs_cols = np.setdiff1d(mapped_h5ad.obs.columns, sdata['table'].obs.columns)
122+
mapping_obs_cols = np.setdiff1d(mapped_adata.obs.columns, adata.obs.columns)
106123
if len(mapping_obs_cols) == 0:
107124
print("No new columns to add from mapped data")
108125
else:
109126
print(f"Adding {len(mapping_obs_cols)} columns from mapped data: {mapping_obs_cols}")
110-
sdata['table'].obs = sdata['table'].obs.merge(
111-
mapped_h5ad.obs[mapping_obs_cols],
127+
adata.obs = adata.obs.merge(
128+
mapped_adata.obs[mapping_obs_cols],
112129
left_index=True,
113130
right_index=True,
114131
how='outer'
115132
)
116-
mapping_vars_cols = np.setdiff1d(mapped_h5ad.var.columns, sdata['table'].var.columns)
133+
mapping_vars_cols = np.setdiff1d(mapped_adata.var.columns, adata.var.columns)
117134
if len(mapping_vars_cols) == 0:
118135
print("No new columns to add from mapped data")
119136
else:
120137
print(f"Adding {len(mapping_vars_cols)} columns from mapped data: {mapping_vars_cols}")
121-
sdata['table'].var = sdata['table'].var.merge(
122-
mapped_h5ad.var[mapping_vars_cols],
138+
adata.var = adata.var.merge(
139+
mapped_adata.var[mapping_vars_cols],
123140
left_index=True,
124141
right_index=True,
125142
how='outer'
126143
)
127-
return sdata
144+
return adata
128145

129-
def add_type_id_columns(sdata, col_name, table_name='table'):
130-
if col_name in sdata[table_name].obs.columns:
146+
def add_type_id_columns(adata, col_name):
147+
if col_name in adata.obs.columns:
131148
col_id = col_name.replace('name', 'id')
132-
sdata[table_name].obs[col_id] = sdata[table_name].obs[col_name].str.split(' ').str[0].astype('int')
149+
adata.obs[col_id] = adata.obs[col_name].str.split(' ').str[0].astype('int')
133150
print(f"Added {col_id} column")
134151
else:
135-
print(f"{col_name} column not found in {table_name}.obs")
136-
return sdata
152+
print(f"{col_name} column not found in adata.obs")
153+
return adata
137154

138-
def add_grouped_types_columns(sdata,
155+
def add_grouped_types_columns(adata,
139156
new_col,
140157
type_mappings=None,
141-
table_name='table',
142158
null_value='other'):
143159
default_mappings = {
144160
'broad_class': {
@@ -178,14 +194,14 @@ def add_grouped_types_columns(sdata,
178194
norm_mappings[crit_col] = norm
179195

180196
# Initialize column
181-
print(f"Adding '{new_col}' to {table_name}.obs")
182-
sdata[table_name].obs[new_col] = null_value
197+
print(f"Adding '{new_col}' to adata.obs")
198+
adata.obs[new_col] = null_value
183199

184200
for crit_col, rules in norm_mappings.items():
185-
if crit_col not in sdata[table_name].obs.columns:
201+
if crit_col not in adata.obs.columns:
186202
# skip missing criteria columns
187203
continue
188-
series = sdata[table_name].obs[crit_col]
204+
series = adata.obs[crit_col]
189205

190206
for rule in rules:
191207
op = rule['op']
@@ -226,9 +242,9 @@ def add_grouped_types_columns(sdata,
226242
# on any evaluation error, skip this rule
227243
continue
228244

229-
sdata[table_name].obs.loc[mask, new_col] = assign
245+
adata.obs.loc[mask, new_col] = assign
230246

231-
return sdata
247+
return adata
232248

233249
def get_transcripts_bboxes(transcripts, id_col='cell_labels'):
234250
transcripts = transcripts.compute() if hasattr(transcripts, 'compute') else transcripts

0 commit comments

Comments
 (0)