File size: 10,510 Bytes
9d54b72
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
import ast
from enum import Enum
from pathlib import Path
from typing import IO
from zipfile import ZipFile

import numexpr as ne
import numpy
import numpy as np

PYTHON_ENDIANNESS = 'big'
NUMPY_FLOAT_DTYPE = ">f8"


class SpecialLogFileType(Enum):
    IDA_DATA_IDENTIFIER = "IDAData logfile"
    ODE_DATA_IDENTIFIER = "ODEData logfile"
    NETCDF_DATA_IDENTIFIER = "NetCDFData logfile"
    MOVINGBOUNDARY_DATA_IDENTIFIER = "MBSData"
    COMSOLE_DATA_IDENTIFIER = "COMSOL"

    @staticmethod
    def from_string(s: str):
        for special_log_file_type in SpecialLogFileType:
            if s == special_log_file_type.value:
                return special_log_file_type
        return None


class DomainType(Enum):
    POSTPROCESSING = "PostProcessing"
    UNKNOWN = "Unknown"
    VOLUME = "Volume"
    MEMBRANE = "Membrane"
    CONTOUR = "Contour"
    NONSPATIAL = "Nonspatial"
    POINT = "Point"


class VariableType(Enum):
    UNKNOWN = 0
    VOLUME = 1
    MEMBRANE = 2
    CONTOUR = 3
    VOLUME_REGION = 4
    MEMBRANE_REGION = 5
    CONTOUR_REGION = 6
    NONSPATIAL = 7
    VOLUME_PARTICLE = 8
    MEMBRANE_PARTICLE = 9
    POINT_VARIABLE = 10
    POSTPROCESSING = 999

    @staticmethod
    def from_string(s: str):
        switcher = {
            "Unknown": VariableType.UNKNOWN,
            "Volume_VariableType": VariableType.VOLUME,
            "Membrane_VariableType": VariableType.MEMBRANE,
            "Contour_VariableType": VariableType.CONTOUR,
            "Volume_Region_VariableType": VariableType.VOLUME_REGION,
            "Membrane_Region_VariableType": VariableType.MEMBRANE_REGION,
            "Contour_Region_VariableType": VariableType.CONTOUR_REGION,
            "Nonspatial_VariableType": VariableType.NONSPATIAL,
            "Volume_Particle_VariableType": VariableType.VOLUME_PARTICLE,
            "Membrane_Particle_VariableType": VariableType.MEMBRANE_PARTICLE,
            "Point_Variable_VariableType": VariableType.POINT_VARIABLE,
            "PostProcessing_VariableType": VariableType.POSTPROCESSING
        }
        return switcher.get(s, VariableType.UNKNOWN)


class DataFileHeader:
    magic_string: str
    version_string: str
    num_blocks: int
    first_block_offset: int
    sizeX: int
    sizeY: int
    sizeZ: int

    def read(self, f: IO[bytes]) -> int:
        read_count = 0
        self.magic_string = f.read(16).decode('utf-8').split('\x00')[0]
        read_count += 16
        self.version_string = f.read(8).decode('utf-8').split('\x00')[0]
        read_count += 8
        self.num_blocks = int.from_bytes(f.read(4), byteorder=PYTHON_ENDIANNESS)
        read_count += 4
        self.first_block_offset = int.from_bytes(f.read(4), byteorder=PYTHON_ENDIANNESS)
        read_count += 4
        self.sizeX = int.from_bytes(f.read(4), byteorder=PYTHON_ENDIANNESS)
        read_count += 4
        self.sizeY = int.from_bytes(f.read(4), byteorder=PYTHON_ENDIANNESS)
        read_count += 4
        self.sizeZ = int.from_bytes(f.read(4), byteorder=PYTHON_ENDIANNESS)
        read_count += 4

        return read_count


class VariableInfo:
    var_name: str
    variable_type: VariableType


class DataBlockHeader:
    var_name: VariableInfo
    variable_type: VariableType
    size: int
    data_offset: int

    def read(self, f: IO[bytes]) -> int:
        read_count = 0
        self.var_name: str = f.read(124).decode('utf-8').split('\x00')[0]
        read_count += 124
        self.variable_type = VariableType(int.from_bytes(f.read(4), byteorder=PYTHON_ENDIANNESS))
        read_count += 4
        self.size = int.from_bytes(f.read(4), byteorder=PYTHON_ENDIANNESS)
        read_count += 4
        self.data_offset = int.from_bytes(f.read(4), byteorder=PYTHON_ENDIANNESS)
        read_count += 4
        return read_count


class DataZipFileMetadata:
    zip_file: Path
    zip_entry: str
    file_header: DataFileHeader
    data_blocks: list[DataBlockHeader]

    # constructor
    def __init__(self, zip_file: Path, zip_entry: str) -> None:
        self.zip_file = zip_file
        self.zip_entry = zip_entry

    def read(self) -> None:
        with ZipFile(self.zip_file, 'r') as zip:
            with zip.open(self.zip_entry) as f:
                self.file_header = DataFileHeader()
                self.file_header.read(f)
                blocks = []
                for _ in range(self.file_header.num_blocks):
                    data_block = DataBlockHeader()
                    data_block.read(f)
                    blocks.append(data_block)
                self.data_blocks = blocks

    def get_data_block_header(self, variable) -> DataBlockHeader:
        for db in self.data_blocks:
            if db.var_name == variable:
                return db
        raise ValueError(f"Variable {variable} not found in zip entry {self.zip_entry}")


class PdeDataSet:
    base_dir: Path
    log_filename: str
    data_filenames: list[str]
    zip_filenames: list[str]
    data_times: list[float]
    data_zip_file_metadata: dict[float, DataZipFileMetadata]

    def __init__(self, base_dir: Path, log_filename: str) -> None:
        self.base_dir = base_dir
        self.log_filename = log_filename
        self.data_filenames = []
        self.zip_filenames = []
        self.data_times = []
        self.data_zip_file_metadata = {}

    def read(self) -> None:
        log_file: Path = self.base_dir / self.log_filename
        with log_file.open('r') as f:
            first_line = True
            for line in f:
                if first_line:
                    # if line starts with a string from SpecialLogFileType, then it is not a standard PDE log file
                    if SpecialLogFileType.from_string(line):
                        special_log_file_type = SpecialLogFileType.from_string(line)
                        raise NotImplementedError(f"Special log file type {special_log_file_type} not implemented")
                    first_line = False
                _iteration, filename, zip_filename, time_str = line.split()
                self.data_filenames.append(filename)
                self.zip_filenames.append(zip_filename)
                self.data_times.append(float(time_str))

    def times(self) -> list[float]:
        return self.data_times

    def time_index(self, time: float) -> int:
        return self.data_times.index(time)

    def first_data_zip_file_metadata(self) -> DataZipFileMetadata:
        first_zip_entry = self.data_zip_file_metadata.get(0.0)
        if first_zip_entry is None:
            first_zip_entry = DataZipFileMetadata(self.base_dir / self.zip_filenames[0], self.data_filenames[0])
            first_zip_entry.read()
        return first_zip_entry

    def variables_block_headers(self) -> list[DataBlockHeader]:
        first_zip_entry = self.first_data_zip_file_metadata()
        if first_zip_entry is None:
            return []
        return [db for db in first_zip_entry.data_blocks]

    def _get_data_zip_file_metadata(self, time: float) -> DataZipFileMetadata:
        zip_entry = self.data_zip_file_metadata.get(time)
        if zip_entry is None:
            time_index = self.time_index(time)
            zip_file_path = self.base_dir / self.zip_filenames[time_index]
            zip_entry = DataZipFileMetadata(zip_file_path, self.data_filenames[time_index])
            zip_entry.read()
            self.data_zip_file_metadata[time] = zip_entry
        return zip_entry

    def get_data(self, variable: str, time: float) -> numpy.ndarray:
        zip_file_entry: DataZipFileMetadata = self._get_data_zip_file_metadata(time)
        data_block_header: DataBlockHeader = zip_file_entry.get_data_block_header(variable)

        with (ZipFile(zip_file_entry.zip_file, 'r') as zip):
            with zip.open(zip_file_entry.zip_entry, mode='r') as f:
                f.seek(data_block_header.data_offset)
                buffer = bytearray(0)
                bytes_left_to_read = data_block_header.size * 8
                while bytes_left_to_read > 0:
                    bytes_read = f.read(bytes_left_to_read)
                    buffer.extend(bytes_read)
                    bytes_left_to_read -= len(bytes_read)
                array = np.frombuffer(buffer, dtype=NUMPY_FLOAT_DTYPE)
                return array


class NamedFunction:
    name: str
    vcell_expression: str
    python_expression: str
    variables: list[str]
    variable_type: VariableType

    def __init__(self, name: str, vcell_expression: str, variable_type: VariableType) -> None:
        self.name = name
        self.vcell_expression = vcell_expression
        self.python_expression = vcell_expression.replace("^", "**").lstrip(" ").rstrip(" ")
        self.variable_type = variable_type

        # Parse the python expression into an AST and extract all Name nodes (which represent variables)
        tree = ast.parse(self.python_expression)
        self.variables = [node.id for node in ast.walk(tree) if isinstance(node, ast.Name)]

    def evaluate(self, variable_bindings: dict[str, np.ndarray]) -> np.ndarray:
        ne.set_num_threads(1)
        expression = self.python_expression
        return ne.evaluate(expression, local_dict=variable_bindings)

    def __str__(self):
        return f"NamedFunction(name={self.name}, vcell_expression={self.vcell_expression}, python_expression={self.vcell_expression}, variable_type={self.variable_type}, variables={self.variables}"


class DataFunctions:
    function_file: Path
    named_functions: list[NamedFunction]

    def __init__(self, function_file: Path) -> None:
        self.function_file = function_file
        self.named_functions = []

    def read(self) -> None:
        with self.function_file.open('r') as f:
            # skip lines beginning with # and blank lines
            for line in f:
                if line.startswith('#') or line.isspace():
                    continue
                # read each named function from one line
                # example line: "cytosol::J_r0; (RanC_cyt - (1000.0 * C_cyt * Ran_cyt)); ; Volume_VariableType; false"
                parts = line.split(';')
                name = parts[0].strip(" ")
                expression = parts[1].strip(" ")
                _unknown_skipped = parts[2]
                variable_type = VariableType.from_string(parts[3].strip(" "))
                _boolean_skipped = parts[4]
                function = NamedFunction(name=name, vcell_expression=expression, variable_type=variable_type)
                self.named_functions.append(function)