-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathjson.py
More file actions
336 lines (272 loc) · 13 KB
/
json.py
File metadata and controls
336 lines (272 loc) · 13 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
import json
import os
import tempfile
from pathlib import Path
from typing import List, Optional
from ..tables import Column, ColumnType, ITablesSnapshot, Table
def _atomic_write_text(path: Path, content: str) -> None:
"""Write content to a file atomically using write-to-temp + os.replace."""
fd, tmp_path = tempfile.mkstemp(dir=path.parent, suffix=".tmp")
try:
with os.fdopen(fd, "w", encoding="utf-8") as f:
f.write(content)
f.flush()
os.fsync(f.fileno())
os.replace(tmp_path, path)
except BaseException:
try:
os.unlink(tmp_path)
except OSError:
pass
raise
class FileSystemJsonTables(ITablesSnapshot):
workdir: Path
def __init__(self, workdir: Path):
self.workdir = workdir
self._ensure_metadata_table()
def _ensure_metadata_table(self):
"""Ensure the metadata table exists"""
metadata_path = self.workdir / "__schema__.json"
if not metadata_path.exists():
_atomic_write_text(metadata_path, json.dumps({}))
def _get_table_metadata_by_name(
self, table_name: str
) -> tuple[Optional[str], Optional[List[Column]]]:
"""Get table metadata (id and columns) by table name from the __schema__.json file"""
metadata_path = self.workdir / "__schema__.json"
metadata = json.loads(metadata_path.read_text())
for table_id, table_info in metadata.items():
if table_info.get("table_name") == table_name:
columns = []
for col_dict in table_info.get("columns", []):
columns.append(Column.from_dict(col_dict))
return table_id, columns
return None, None
def _get_table_metadata_by_id(
self, table_id: str
) -> tuple[Optional[str], Optional[List[Column]]]:
"""Get table metadata (name and columns) by table ID from the __schema__.json file"""
metadata_path = self.workdir / "__schema__.json"
metadata = json.loads(metadata_path.read_text())
table_info = metadata.get(table_id)
if table_info:
columns = []
for col_dict in table_info.get("columns", []):
columns.append(Column.from_dict(col_dict))
return table_info.get("table_name"), columns
return None, None
def _save_table_metadata(
self, table_id: str, table_name: str, columns: List[Column]
):
"""Save table metadata to the __schema__.json file"""
metadata_path = self.workdir / "__schema__.json"
metadata = json.loads(metadata_path.read_text())
# Convert Column objects to dicts with proper serialization
column_dicts = []
for col in columns:
col_dict = col.to_dict()
column_dicts.append(col_dict)
metadata[table_id] = {"table_name": table_name, "columns": column_dicts}
_atomic_write_text(metadata_path, json.dumps(metadata, indent=2))
def _remove_table_metadata(self, table_id: str):
"""Remove table metadata from the __schema__.json file"""
metadata_path = self.workdir / "__schema__.json"
metadata = json.loads(metadata_path.read_text())
if table_id in metadata:
del metadata[table_id]
_atomic_write_text(metadata_path, json.dumps(metadata, indent=2))
def get_table(self, name: str) -> Optional[Table]:
table_id, columns = self._get_table_metadata_by_name(name)
if table_id is None:
raise FileNotFoundError(f"Table {name} not found")
table_path = self.workdir / f"{table_id}.json"
if not table_path.exists():
raise FileNotFoundError(f"File {table_path} does not exist")
rows = json.loads(table_path.read_text())
if not columns:
# Fallback: infer columns from data if metadata doesn't exist
columns_set = set()
for row in rows:
assert isinstance(row, dict), f"Row {row} is not a dictionary"
for key, value in row.items():
if key not in [col.name for col in columns_set]:
col = Column(name=key, schema=ColumnType.from_value(value))
columns_set.add(col)
columns = list(columns_set)
# Save inferred metadata
self._save_table_metadata(table_id, name, columns)
# Create table object for conversion purposes
temp_table = Table(name=name, columns=columns, data=[], table_id=table_id)
# Convert data from column IDs to column names
converted_data = []
for row in rows:
converted_row = temp_table.convert_row_from_column_ids(row)
converted_data.append(converted_row)
return Table(name=name, columns=columns, data=converted_data, table_id=table_id)
def add_table(self, table: Table):
# Check if table name already exists
existing_id, _ = self._get_table_metadata_by_name(table.name)
if existing_id is not None:
raise ValueError(f"Table {table.name} already exists")
table_path = self.workdir / f"{table.table_id}.json"
if table_path.exists():
raise ValueError(f"Table with ID {table.table_id} already exists")
# Convert data to column ID format before saving
data_with_ids = []
for row in table.data:
row_with_ids = table.convert_row_to_column_ids(row)
data_with_ids.append(row_with_ids)
_atomic_write_text(table_path, json.dumps(data_with_ids, indent=2))
# Save columns metadata
self._save_table_metadata(table.table_id, table.name, table.columns)
def remove_table(self, name: str):
table_id, _ = self._get_table_metadata_by_name(name)
if table_id is None:
raise ValueError(f"Table {name} not found")
table_path = self.workdir / f"{table_id}.json"
if not table_path.exists():
raise FileNotFoundError(f"File {table_path} does not exist")
table_path.unlink()
self._remove_table_metadata(table_id)
def rename_table(self, old_name: str, new_name: str):
table_id, columns = self._get_table_metadata_by_name(old_name)
if table_id is None:
raise ValueError(f"Table {old_name} not found")
# Check if new name already exists
existing_id, _ = self._get_table_metadata_by_name(new_name)
if existing_id is not None:
raise ValueError(f"Table {new_name} already exists")
# Update metadata with new name
self._save_table_metadata(table_id, new_name, columns)
def _insert(self, table_name: str, row: dict):
table_id, columns = self._get_table_metadata_by_name(table_name)
if table_id is None:
raise ValueError(f"Table {table_name} not found")
table_path = self.workdir / f"{table_id}.json"
if not table_path.exists():
raise FileNotFoundError(f"File {table_path} does not exist")
# Create temp table for conversion
temp_table = Table(name=table_name, columns=columns, data=[], table_id=table_id)
rows = json.loads(table_path.read_text())
assert isinstance(rows, list), (
f"File {table_path} does not contain a list of rows"
)
# Convert row to column ID format
row_with_ids = temp_table.convert_row_to_column_ids(row)
rows.append(row_with_ids)
_atomic_write_text(table_path, json.dumps(rows, indent=2))
def add_column(self, table_name: str, column: Column):
table_id, existing_columns = self._get_table_metadata_by_name(table_name)
if table_id is None:
raise ValueError(f"Table {table_name} not found")
table_path = self.workdir / f"{table_id}.json"
if not table_path.exists():
raise FileNotFoundError(f"File {table_path} does not exist")
rows = json.loads(table_path.read_text())
assert isinstance(rows, list), (
f"File {table_path} does not contain a list of rows"
)
# Check if column already exists
if any(col.name == column.name for col in existing_columns):
raise ValueError(
f"Column {column.name} already exists in table {table_name}"
)
# Add column to data using column ID
for row in rows:
row[column.column_id] = column.default
_atomic_write_text(table_path, json.dumps(rows, indent=2))
# Update metadata
existing_columns.append(column)
self._save_table_metadata(table_id, table_name, existing_columns)
def remove_column(self, table_name: str, column_name: str):
table_id, columns = self._get_table_metadata_by_name(table_name)
if table_id is None:
raise ValueError(f"Table {table_name} not found")
table_path = self.workdir / f"{table_id}.json"
if not table_path.exists():
raise FileNotFoundError(f"File {table_path} does not exist")
rows = json.loads(table_path.read_text())
assert isinstance(rows, list), (
f"File {table_path} does not contain a list of rows"
)
# Remove column from data using column ID
column_to_remove = None
for col in columns:
if col.name == column_name:
column_to_remove = col
break
if column_to_remove:
for row in rows:
if column_to_remove.column_id in row:
del row[column_to_remove.column_id]
_atomic_write_text(table_path, json.dumps(rows, indent=2))
# Update metadata
columns = [col for col in columns if col.name != column_name]
self._save_table_metadata(table_id, table_name, columns)
def rename_column(self, table_name: str, old_name: str, new_name: str):
table_id, columns = self._get_table_metadata_by_name(table_name)
if table_id is None:
raise ValueError(f"Table {table_name} not found")
table_path = self.workdir / f"{table_id}.json"
if not table_path.exists():
raise FileNotFoundError(f"File {table_path} does not exist")
rows = json.loads(table_path.read_text())
assert isinstance(rows, list), (
f"File {table_path} does not contain a list of rows"
)
# Data doesn't need to change for rename_column since we use column IDs
# Only metadata needs to be updated
for col in columns:
if col.name == old_name:
col.name = new_name
self._save_table_metadata(table_id, table_name, columns)
def change_column_type(
self, table_name: str, column_name: str, new_type: ColumnType
):
table_id, columns = self._get_table_metadata_by_name(table_name)
if table_id is None:
raise ValueError(f"Table {table_name} not found")
# Update metadata
for col in columns:
if col.name == column_name:
col.schema = new_type
break
else:
raise ValueError(f"Column {column_name} not found in table {table_name}")
self._save_table_metadata(table_id, table_name, columns)
def _update(self, table_name: str, idx: int, changes: dict):
table_id, columns = self._get_table_metadata_by_name(table_name)
if table_id is None:
raise ValueError(f"Table {table_name} not found")
table_path = self.workdir / f"{table_id}.json"
if not table_path.exists():
raise FileNotFoundError(f"File {table_path} does not exist")
# Create temp table for conversion
temp_table = Table(name=table_name, columns=columns, data=[], table_id=table_id)
rows = json.loads(table_path.read_text())
assert isinstance(rows, list), (
f"File {table_path} does not contain a list of rows"
)
if idx < 0 or idx >= len(rows):
raise IndexError(f"Index {idx} out of range for table {table_name}")
# Convert changes to column ID format
changes_with_ids = temp_table.convert_row_to_column_ids(changes)
rows[idx].update(changes_with_ids)
_atomic_write_text(table_path, json.dumps(rows, indent=2))
def _delete(self, table_name: str, idxs: List[int]):
table_id, _ = self._get_table_metadata_by_name(table_name)
if table_id is None:
raise ValueError(f"Table {table_name} not found")
table_path = self.workdir / f"{table_id}.json"
if not table_path.exists():
raise FileNotFoundError(f"File {table_path} does not exist")
rows = json.loads(table_path.read_text())
assert isinstance(rows, list), (
f"File {table_path} does not contain a list of rows"
)
# Sort indices in descending order to avoid index shifting
for idx in sorted(idxs, reverse=True):
if idx < 0 or idx >= len(rows):
raise IndexError(f"Index {idx} out of range for table {table_name}")
del rows[idx]
_atomic_write_text(table_path, json.dumps(rows, indent=2))