Skip to content

Commit 1a04b1b

Browse files
committed
Merge branch 'develop' into SYNPY-1794
2 parents 303b670 + fe3a0c9 commit 1a04b1b

6 files changed

Lines changed: 45 additions & 28 deletions

File tree

.github/workflows/build.yml

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ jobs:
8484
path: |
8585
${{ steps.get-dependencies.outputs.site_packages_loc }}
8686
${{ steps.get-dependencies.outputs.site_bin_dir }}
87-
key: ${{ runner.os }}-${{ matrix.python }}-build-${{ env.cache-name }}-${{ hashFiles('setup.py') }}-v32
87+
key: ${{ runner.os }}-${{ matrix.python }}-build-${{ env.cache-name }}-${{ hashFiles('setup.py', 'setup.cfg') }}-v33
8888

8989
- name: Install py-dependencies
9090
if: steps.cache-dependencies.outputs.cache-hit != 'true'
@@ -209,6 +209,7 @@ jobs:
209209
210210
# Run only the failed tests
211211
pytest -sv --reruns 3 --cov-append --cov=. --cov-report xml \
212+
--html=integration-test-report.html --self-contained-html \
212213
--junit-xml=test-results.xml \
213214
-n 8 --dist loadscope \
214215
$(cat failed_tests.txt | tr '\n' ' ')
@@ -217,12 +218,13 @@ jobs:
217218
218219
# use loadscope to avoid issues running tests concurrently that share scoped fixtures
219220
pytest -sv --reruns 3 --cov-append --cov=. --cov-report xml \
221+
--html=integration-test-report.html --self-contained-html \
220222
--junit-xml=test-results.xml \
221223
tests/integration -n 8 $IGNORE_FLAGS --dist loadscope
222224
fi
223225
224226
# Execute the CLI tests in a non-dist way because they were causing some test instability when being run concurrently
225-
pytest -sv --reruns 3 --cov-append --cov=. --cov-report xml tests/integration/synapseclient/test_command_line_client.py
227+
pytest -sv --reruns 3 --cov-append --cov=. --cov-report xml --html=cli-test-report.html --self-contained-html tests/integration/synapseclient/test_command_line_client.py
226228
227229
- name: Extract Failed Tests
228230
if: always() && steps.integration_tests.outcome == 'failure'
@@ -292,6 +294,17 @@ jobs:
292294
echo "::error::Integration tests failed after ${{ github.run_attempt }} attempt(s)"
293295
exit 1
294296
297+
- name: Upload integration test HTML report
298+
# make sure report always gets uploaded if the integration tests ran, even if they failed, but skip if the integration tests were skipped
299+
if: always() && steps.integration_tests.outcome != 'skipped'
300+
uses: actions/upload-artifact@v4
301+
with:
302+
name: integration-test-report-${{ matrix.os }}-${{ matrix.python }}
303+
path: |
304+
integration-test-report.html
305+
cli-test-report.html
306+
retention-days: 14
307+
295308
- name: Upload coverage report
296309
id: upload_coverage_report
297310
uses: actions/upload-artifact@v4

setup.cfg

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,7 @@ tests_require =
7272
pytest-rerunfailures~=12.0
7373
func-timeout~=4.3
7474
pytest-cov~=4.1.0
75+
pytest-html~=4.1.0
7576
pandas>=1.5,<3.0
7677

7778
[options.extras_require]
@@ -85,6 +86,7 @@ dev =
8586
pytest-rerunfailures~=12.0
8687
func-timeout~=4.3
8788
pytest-cov~=4.1.0
89+
pytest-html~=4.1.0
8890
black
8991
pre-commit
9092
filelock>=3.20.3
@@ -100,6 +102,7 @@ tests =
100102
pytest-rerunfailures~=12.0
101103
func-timeout~=4.3
102104
pytest-cov~=4.1.0
105+
pytest-html~=4.1.0
103106
pandas>=1.5,<3.0
104107
jsonschema>=4.23.0
105108

tests/integration/conftest.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -238,4 +238,3 @@ def wrap_with_otel(request):
238238
finally:
239239
for processor in active_span_processors:
240240
processor.force_flush()
241-
span.end()

tests/integration/synapseclient/models/async/test_json_schema_async.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -309,7 +309,7 @@ async def _get_derived_keys():
309309

310310
response = await wait_for_condition(
311311
_get_derived_keys,
312-
timeout_seconds=30,
312+
timeout_seconds=60,
313313
poll_interval_seconds=2,
314314
description="schema derived keys to be populated",
315315
)
@@ -373,7 +373,7 @@ async def _validate_invalid():
373373

374374
response = await wait_for_condition(
375375
_validate_invalid,
376-
timeout_seconds=30,
376+
timeout_seconds=60,
377377
poll_interval_seconds=2,
378378
description="schema validation results (invalid) to be available",
379379
)

tests/integration/synapseclient/models/async/test_recordset_async.py

Lines changed: 22 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -637,9 +637,10 @@ async def test_get_validation_results_with_default_location_async(
637637
"expected type: String, found: Null"
638638
in results_df.loc[2, "validation_error_message"]
639639
), f"Row 2 should have null type error, got: {results_df.loc[2, 'validation_error_message']}"
640-
assert "#/name: expected type: String, found: Null" in str(
641-
results_df.loc[2, "all_validation_messages"]
642-
), f"Row 2 all_validation_messages incorrect: {results_df.loc[2, 'all_validation_messages']}"
640+
##TODO: uncomment the test after PLFM-9532 is resolved
641+
# assert "#/name: expected type: String, found: Null" in str(
642+
# results_df.loc[2, "all_validation_messages"]
643+
# ), f"Row 2 all_validation_messages incorrect: {results_df.loc[2, 'all_validation_messages']}"
643644

644645
# AND row 3 should be invalid (multiple violations: minLength, maximum, enum)
645646
assert (
@@ -648,18 +649,18 @@ async def test_get_validation_results_with_default_location_async(
648649
assert (
649650
"3 schema violations found" in results_df.loc[3, "validation_error_message"]
650651
), f"Row 3 should have 3 violations, got: {results_df.loc[3, 'validation_error_message']}"
651-
all_msgs_3 = str(results_df.loc[3, "all_validation_messages"])
652-
assert (
653-
"#/name: expected minLength: 3, actual: 2" in all_msgs_3
654-
), f"Row 3 should have minLength violation: {all_msgs_3}"
655-
assert (
656-
"#/value: 1500 is not less or equal to 1000" in all_msgs_3
657-
or "1500" in all_msgs_3
658-
), f"Row 3 should have maximum violation: {all_msgs_3}"
659-
assert (
660-
"#/category: X is not a valid enum value" in all_msgs_3
661-
or "enum" in all_msgs_3.lower()
662-
), f"Row 3 should have enum violation: {all_msgs_3}"
652+
# all_msgs_3 = str(results_df.loc[3, "all_validation_messages"])
653+
# assert (
654+
# "#/name: expected minLength: 3, actual: 2" in all_msgs_3
655+
# ), f"Row 3 should have minLength violation: {all_msgs_3}"
656+
# assert (
657+
# "#/value: 1500 is not less or equal to 1000" in all_msgs_3
658+
# or "1500" in all_msgs_3
659+
# ), f"Row 3 should have maximum violation: {all_msgs_3}"
660+
# assert (
661+
# "#/category: X is not a valid enum value" in all_msgs_3
662+
# or "enum" in all_msgs_3.lower()
663+
# ), f"Row 3 should have enum violation: {all_msgs_3}"
663664

664665
# AND row 4 should be invalid (value below minimum)
665666
assert (
@@ -669,9 +670,9 @@ async def test_get_validation_results_with_default_location_async(
669670
"-50.0 is not greater or equal to 0"
670671
in results_df.loc[4, "validation_error_message"]
671672
), f"Row 4 should have minimum violation, got: {results_df.loc[4, 'validation_error_message']}"
672-
assert "#/value: -50.0 is not greater or equal to 0" in str(
673-
results_df.loc[4, "all_validation_messages"]
674-
), f"Row 4 all_validation_messages incorrect: {results_df.loc[4, 'all_validation_messages']}"
673+
# assert "#/value: -50.0 is not greater or equal to 0" in str(
674+
# results_df.loc[4, "all_validation_messages"]
675+
# ), f"Row 4 all_validation_messages incorrect: {results_df.loc[4, 'all_validation_messages']}"
675676

676677
async def test_get_validation_results_with_custom_location_async(
677678
self, record_set_with_validation_fixture: RecordSet
@@ -719,9 +720,9 @@ async def test_get_validation_results_with_custom_location_async(
719720
assert pd.notna(
720721
row["validation_error_message"]
721722
), f"Row {idx} is marked invalid but has no validation_error_message"
722-
assert pd.notna(
723-
row["all_validation_messages"]
724-
), f"Row {idx} is marked invalid but has no all_validation_messages"
723+
# assert pd.notna(
724+
# row["all_validation_messages"]
725+
# ), f"Row {idx} is marked invalid but has no all_validation_messages"
725726

726727
async def test_get_validation_results_no_file_handle_emits_warning_async(
727728
self, syn_with_logger: Synapse, caplog: pytest.LogCaptureFixture

tests/integration/synapseclient/models/async/test_virtualtable_async.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44

55
import pandas as pd
66
import pytest
7+
import pytest_asyncio
78

89
from synapseclient import Synapse
910
from synapseclient.core.exceptions import SynapseHTTPError
@@ -149,12 +150,12 @@ async def test_virtual_table_lifecycle(self, project_model: Project) -> None:
149150

150151

151152
class TestVirtualTableWithDataOperations:
152-
@pytest.fixture(autouse=True, scope="function")
153+
@pytest_asyncio.fixture(autouse=True, scope="function")
153154
def init(self, syn: Synapse, schedule_for_cleanup: Callable[..., None]) -> None:
154155
self.syn = syn
155156
self.schedule_for_cleanup = schedule_for_cleanup
156157

157-
@pytest.fixture(scope="class")
158+
@pytest_asyncio.fixture(scope="class")
158159
async def base_table_with_data(
159160
self,
160161
project_model: Project,

0 commit comments

Comments
 (0)