1
1
# %% Import packages
2
2
3
3
import itertools
4
+ import math
4
5
from pathlib import Path
5
6
from bfio import BioReader
6
7
import numpy as np
7
8
from cloudvolume import CloudVolume
8
- from cloudvolume .lib import touch
9
+ from cloudvolume .lib import touch , Vec
9
10
import json
11
+ from neuroglancer .downsample import downsample_with_averaging
10
12
11
13
# %% Define the path to the files
12
14
54
56
55
57
num_channels = br .shape [- 1 ]
56
58
data_type = "uint16"
57
- chunk_size = [256 , 256 , 128 , 1 ]
58
- volume_size = [br .shape [1 ], br .shape [0 ], br .shape [2 ]] # XYZ
59
+ chunk_size = [256 , 256 , 128 ]
60
+ volume_size = [br .shape [1 ], br .shape [0 ], br .shape [2 ]] # XYZ
59
61
60
62
# %% Setup the cloudvolume info
61
63
info = CloudVolume .create_new_info (
65
67
encoding = "raw" ,
66
68
resolution = [size_x , size_y , size_z ],
67
69
voxel_offset = [0 , 0 , 0 ],
68
- chunk_size = chunk_size [: - 1 ] ,
70
+ chunk_size = chunk_size ,
69
71
volume_size = volume_size ,
72
+ max_mip = 2 ,
73
+ factor = Vec (2 , 2 , 2 ),
70
74
)
71
- vol = CloudVolume ("file://" + str (OUTPUT_PATH ), info = info )
72
- vol .provenance .description = "Example data conversion"
73
- vol .commit_info ()
74
- vol .commit_provenance ()
75
+ vols = [ CloudVolume ("file://" + str (OUTPUT_PATH ), mip = i ) for i in range ( 3 )]
76
+ vols [ 0 ] .provenance .description = "Example data conversion"
77
+ vols [ 0 ] .commit_info ()
78
+ vols [ 0 ] .commit_provenance ()
75
79
76
80
# %% Setup somewhere to hold progress
77
81
progress_dir = OUTPUT_PATH / "progress"
78
82
progress_dir .mkdir (exist_ok = True )
79
83
80
84
81
85
# %% Functions for moving data
82
- shape = np .array ([br .shape [1 ], br .shape [0 ], br .shape [2 ], br . shape [ 3 ] ])
83
- chunk_shape = np .array ([1024 , 1024 , 512 , 1 ]) # this is for reading data
86
+ shape = np .array ([br .shape [1 ], br .shape [0 ], br .shape [2 ]])
87
+ chunk_shape = np .array ([1024 , 1024 , 512 ]) # this is for reading data
84
88
num_chunks_per_dim = np .ceil (shape / chunk_shape ).astype (int )
85
89
86
90
87
- def chunked_reader (x_i , y_i , z_i , c ):
91
+ def chunked_reader (x_i , y_i , z_i ):
88
92
x_start , x_end = x_i * chunk_shape [0 ], min ((x_i + 1 ) * chunk_shape [0 ], shape [0 ])
89
93
y_start , y_end = y_i * chunk_shape [1 ], min ((y_i + 1 ) * chunk_shape [1 ], shape [1 ])
90
94
z_start , z_end = z_i * chunk_shape [2 ], min ((z_i + 1 ) * chunk_shape [2 ], shape [2 ])
91
95
92
96
# Read the chunk from the BioReader
93
- chunk = br .read (
94
- X = (x_start , x_end ), Y = (y_start , y_end ), Z = (z_start , z_end ), C = (c ,)
95
- )
96
- # Keep expanding dims until it is the same length as chunk_shape
97
- while len (chunk .shape ) < len (chunk_shape ):
98
- chunk = np .expand_dims (chunk , axis = - 1 )
97
+ chunk = br .read (X = (x_start , x_end ), Y = (y_start , y_end ), Z = (z_start , z_end ))
98
+
99
99
# Return the chunk
100
100
return chunk .swapaxes (0 , 1 )
101
101
102
102
103
103
def process (args ):
104
- x_i , y_i , z_i , c = args
104
+ x_i , y_i , z_i = args
105
105
start = [x_i * chunk_shape [0 ], y_i * chunk_shape [1 ], z_i * chunk_shape [2 ]]
106
106
end = [
107
107
min ((x_i + 1 ) * chunk_shape [0 ], shape [0 ]),
@@ -110,29 +110,41 @@ def process(args):
110
110
]
111
111
f_name = (
112
112
progress_dir
113
- / f"{ start [0 ]} -{ end [0 ]} _{ start [1 ]} -{ end [1 ]} _{ start [2 ]} -{ end [2 ]} _ { c } .done"
113
+ / f"{ start [0 ]} -{ end [0 ]} _{ start [1 ]} -{ end [1 ]} _{ start [2 ]} -{ end [2 ]} .done"
114
114
)
115
115
if f_name .exists () and not OVERWRITE :
116
116
return
117
117
print ("Working on" , f_name )
118
- rawdata = chunk = chunked_reader (x_i , y_i , z_i , c )
119
- vol [start [0 ] : end [0 ], start [1 ] : end [1 ], start [2 ] : end [2 ], c ] = rawdata
118
+ rawdata = chunked_reader (x_i , y_i , z_i )
119
+ print (rawdata .shape )
120
+ for mip_level in reversed (range (3 )):
121
+ if mip_level == 0 :
122
+ downsampled = rawdata
123
+ ds_start = start
124
+ ds_end = end
125
+ else :
126
+ downsampled = downsample_with_averaging (
127
+ rawdata , [2 * mip_level , 2 * mip_level , 2 * mip_level , 1 ]
128
+ )
129
+ ds_start = [int (math .ceil (s / (2 * mip_level ))) for s in start ]
130
+ ds_end = [int (math .ceil (e / (2 * mip_level ))) for e in end ]
131
+
132
+ vols [mip_level ][
133
+ ds_start [0 ] : ds_end [0 ], ds_start [1 ] : ds_end [1 ], ds_start [2 ] : ds_end [2 ]
134
+ ] = downsampled
120
135
touch (f_name )
121
136
122
137
123
138
# %% Try with a single chunk to see if it works
124
139
# x_i, y_i, z_i = 0, 0, 0
125
- # process((x_i, y_i, z_i, 0 ))
140
+ # process((x_i, y_i, z_i))
126
141
127
- # %% Can't figure out the writing so do it with fake data
128
- # fake_data = np.random.randint(0, 2**16, size=chunk_size, dtype=np.uint16)
129
- # vol[0:256, 0:256, 0:128, 0] = fake_data
142
+ # %% Loop over all the chunks
130
143
131
144
coords = itertools .product (
132
145
range (num_chunks_per_dim [0 ]),
133
146
range (num_chunks_per_dim [1 ]),
134
147
range (num_chunks_per_dim [2 ]),
135
- range (num_channels ),
136
148
)
137
149
# Do it in reverse order because the last chunks are most likely to error
138
150
reversed_coords = list (coords )
@@ -149,4 +161,6 @@ def process(args):
149
161
process (coord )
150
162
151
163
# %% Serve the dataset to be used in neuroglancer
152
- vol .viewer (port = 1337 )
164
+ vols [0 ].viewer (port = 1337 )
165
+
166
+ # %%
0 commit comments