Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ jobs:
strategy:
fail-fast: false
matrix:
python-version: ['3.9', '3.10', '3.11', '3.12', '3.13']
python-version: ['3.10', '3.11', '3.12', '3.13']

steps:
- name: Checkout
Expand Down
2 changes: 1 addition & 1 deletion README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ Requirements

* Python

- CPython 3.9 3.10, 3.11 3.12 3.13
- CPython 3.10, 3.11, 3.12, 3.13

.. _installation:

Expand Down
2 changes: 1 addition & 1 deletion docs/introduction.rst
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ Requirements

* Python

- CPython 3.9 3.10, 3.11 3.12 3.13
- CPython 3.10, 3.11, 3.12, 3.13

.. _installation:

Expand Down
4 changes: 2 additions & 2 deletions pyathena/arrow/result_set.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,8 +225,8 @@ def _fetch(self) -> None:
dict_rows = rows.to_pydict()
column_names = dict_rows.keys()
processed_rows = [
tuple(self.converters[k](v) for k, v in zip(column_names, row))
for row in zip(*dict_rows.values())
tuple(self.converters[k](v) for k, v in zip(column_names, row, strict=False))
for row in zip(*dict_rows.values(), strict=False)
]
self._rows.extend(processed_rows)

Expand Down
9 changes: 7 additions & 2 deletions pyathena/pandas/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -211,10 +211,15 @@ def to_sql(
for keys, group in df.groupby(by=partitions, observed=True):
keys = keys if isinstance(keys, tuple) else (keys,)
group = group.drop(partitions, axis=1)
partition_prefix = "/".join([f"{key}={val}" for key, val in zip(partitions, keys)])
partition_prefix = "/".join(
[f"{key}={val}" for key, val in zip(partitions, keys, strict=False)]
)
partition_condition = ", ".join(
[f"`{key}` = '{val}'" for key, val in zip(partitions, keys, strict=False)]
)
partition_prefixes.append(
(
", ".join([f"`{key}` = '{val}'" for key, val in zip(partitions, keys)]),
partition_condition,
f"{location}{partition_prefix}/",
)
)
Expand Down
6 changes: 3 additions & 3 deletions pyathena/result_set.py
Original file line number Diff line number Diff line change
Expand Up @@ -396,7 +396,7 @@ def _get_rows(
tuple(
[
self._converter.convert(meta.get("Type"), row.get("VarCharValue"))
for meta, row in zip(metadata, rows[i].get("Data", []))
for meta, row in zip(metadata, rows[i].get("Data", []), strict=False)
]
)
for i in range(offset, len(rows))
Expand All @@ -420,7 +420,7 @@ def _process_rows(self, response: Dict[str, Any]) -> None:
def _is_first_row_column_labels(self, rows: List[Dict[str, Any]]) -> bool:
first_row_data = rows[0].get("Data", [])
metadata = cast(Tuple[Any, Any], self._metadata)
for meta, data in zip(metadata, first_row_data):
for meta, data in zip(metadata, first_row_data, strict=False):
if meta.get("Name") != data.get("VarCharValue"):
return False
return True
Expand Down Expand Up @@ -496,7 +496,7 @@ def _get_rows(
meta.get("Name"),
self._converter.convert(meta.get("Type"), row.get("VarCharValue")),
)
for meta, row in zip(metadata, rows[i].get("Data", []))
for meta, row in zip(metadata, rows[i].get("Data", []), strict=False)
]
)
for i in range(offset, len(rows))
Expand Down
10 changes: 4 additions & 6 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ dependencies = [
"fsspec",
"python-dateutil",
]
requires-python = ">=3.9"
requires-python = ">=3.10"
readme = "README.rst"
license = {file = "LICENSE"}
classifiers = [
Expand All @@ -21,7 +21,6 @@ classifiers = [
"Operating System :: OS Independent",
"Topic :: Database :: Front-Ends",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
Expand Down Expand Up @@ -107,7 +106,7 @@ exclude = [
".venv",
".tox",
]
target-version = "py39"
target-version = "py310"

[tool.ruff.lint]
# https://docs.astral.sh/ruff/rules/
Expand All @@ -125,7 +124,7 @@ select = [
]

[tool.mypy]
python_version = 3.9
python_version = "3.10"
follow_imports = "silent"
disallow_any_generics = true
strict_optional = true
Expand All @@ -149,11 +148,10 @@ exclude = [
legacy_tox_ini = """
[tox]
isolated_build = true
envlist = py{39,310,311,312,313}
envlist = py{310,311,312,313}

[gh-actions]
python =
3.9: py39
3.10: py310
3.11: py311
3.12: py312
Expand Down
4 changes: 2 additions & 2 deletions tests/pyathena/arrow/test_async_cursor.py
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,7 @@ def test_as_arrow(self, async_arrow_cursor):
table = future.result().as_arrow()
assert table.shape[0] == 1
assert table.shape[1] == 1
assert list(zip(*table.to_pydict().values())) == [(1,)]
assert list(zip(*table.to_pydict().values(), strict=False)) == [(1,)]

@pytest.mark.parametrize(
"async_arrow_cursor",
Expand All @@ -238,7 +238,7 @@ def test_many_as_arrow(self, async_arrow_cursor):
table = future.result().as_arrow()
assert table.shape[0] == 10000
assert table.shape[1] == 1
assert list(zip(*table.to_pydict().values())) == [(i,) for i in range(10000)]
assert list(zip(*table.to_pydict().values(), strict=False)) == [(i,) for i in range(10000)]

def test_cancel(self, async_arrow_cursor):
query_id, future = async_arrow_cursor.execute(
Expand Down
8 changes: 4 additions & 4 deletions tests/pyathena/arrow/test_cursor.py
Original file line number Diff line number Diff line change
Expand Up @@ -259,7 +259,7 @@ def test_as_arrow(self, arrow_cursor):
table = arrow_cursor.execute("SELECT * FROM one_row").as_arrow()
assert table.shape[0] == 1
assert table.shape[1] == 1
assert list(zip(*table.to_pydict().values())) == [(1,)]
assert list(zip(*table.to_pydict().values(), strict=False)) == [(1,)]

@pytest.mark.parametrize(
"arrow_cursor",
Expand All @@ -270,7 +270,7 @@ def test_many_as_arrow(self, arrow_cursor):
table = arrow_cursor.execute("SELECT * FROM many_rows").as_arrow()
assert table.shape[0] == 10000
assert table.shape[1] == 1
assert list(zip(*table.to_pydict().values())) == [(i,) for i in range(10000)]
assert list(zip(*table.to_pydict().values(), strict=False)) == [(i,) for i in range(10000)]

def test_complex_as_arrow(self, arrow_cursor):
table = arrow_cursor.execute(
Expand Down Expand Up @@ -323,7 +323,7 @@ def test_complex_as_arrow(self, arrow_cursor):
pa.field("col_decimal", pa.string()),
]
)
assert list(zip(*table.to_pydict().values())) == [
assert list(zip(*table.to_pydict().values(), strict=False)) == [
(
True,
127,
Expand Down Expand Up @@ -406,7 +406,7 @@ def test_complex_unload_as_arrow(self, arrow_cursor):
pa.field("col_decimal", pa.decimal128(10, 1)),
]
)
assert list(zip(*table.to_pydict().values())) == [
assert list(zip(*table.to_pydict().values(), strict=False)) == [
(
True,
127,
Expand Down
2 changes: 1 addition & 1 deletion tests/pyathena/pandas/test_cursor.py
Original file line number Diff line number Diff line change
Expand Up @@ -1582,5 +1582,5 @@ def test_pandas_cursor_iter_chunks_consistency(self, pandas_cursor):
assert len(chunks_via_method) == len(chunks_via_direct)

# Each corresponding chunk should be identical
for chunk1, chunk2 in zip(chunks_via_method, chunks_via_direct):
for chunk1, chunk2 in zip(chunks_via_method, chunks_via_direct, strict=False):
pd.testing.assert_frame_equal(chunk1, chunk2)
Loading