Commit 33e1232c authored by Gmodena's avatar Gmodena
Browse files

Merge branch 'T293382-add-typing' into 'multi-project-dags-repo'

Add mypy checks.

See merge request !5
parents 696e0ecb f2b27501
Pipeline #1178 canceled with stages
...@@ -26,7 +26,7 @@ ima-venv: ...@@ -26,7 +26,7 @@ ima-venv:
make -C ${ima_home} venv make -C ${ima_home} venv
test: test:
cd ${ima_home}; make test cd ${ima_home}; make mypy; make test
archive: ima-venv archive: ima-venv
tar cvz --exclude=".*" -f ${gitlab_package_archive} . tar cvz --exclude=".*" -f ${gitlab_package_archive} .
......
...@@ -26,6 +26,11 @@ venv: ${pip_requirements} ...@@ -26,6 +26,11 @@ venv: ${pip_requirements}
conda install conda-pack; \ conda install conda-pack; \
conda-pack -n ${venv} --format ${venv_archive_format}" conda-pack -n ${venv} --format ${venv_archive_format}"
mypy: ${pip_requirements_test}
${DOCKER_CMD} bash -c "export CONDA_ALWAYS_YES=true; ${CONDA_CMD}; \
pip install -r ${pip_requirements_test}; \
mypy spark"
test: ${pip_requirements_test} test: ${pip_requirements_test}
${DOCKER_CMD} bash -c "export CONDA_ALWAYS_YES=true; ${CONDA_CMD}; \ ${DOCKER_CMD} bash -c "export CONDA_ALWAYS_YES=true; ${CONDA_CMD}; \
conda install openjdk pyspark==${pyspark_version}; \ conda install openjdk pyspark==${pyspark_version}; \
......
...@@ -2,3 +2,5 @@ pytest==6.2.2 ...@@ -2,3 +2,5 @@ pytest==6.2.2
pytest-spark==0.6.0 pytest-spark==0.6.0
pytest-cov==2.10.1 pytest-cov==2.10.1
flake8==3.8.4 flake8==3.8.4
mypy==0.910
pyspark-stubs==2.4.0post10
from pyspark.sql import SparkSession from pyspark.sql import SparkSession
from pyspark.sql import functions as F from pyspark.sql import functions as F
from schema import CsvDataset from .schema import CsvDataset
import argparse import argparse
...@@ -29,7 +29,7 @@ if __name__ == "__main__": ...@@ -29,7 +29,7 @@ if __name__ == "__main__":
csv_df = ( csv_df = (
( (
spark.read.options(delimiter="\t", header=False, escape='"') spark.read.options(delimiter="\t", header="false", escape='"')
.schema(CsvDataset.schema) .schema(CsvDataset.schema)
.csv(source) .csv(source)
) )
......
...@@ -2,8 +2,8 @@ from pyspark.sql import SparkSession ...@@ -2,8 +2,8 @@ from pyspark.sql import SparkSession
from pyspark.sql import Column, DataFrame from pyspark.sql import Column, DataFrame
from pyspark.sql import functions as F from pyspark.sql import functions as F
from pyspark.sql.types import IntegerType from pyspark.sql.types import IntegerType
from schema import RawDataset from .schema import RawDataset
from instances_to_filter import InstancesToFilter from .instances_to_filter import InstancesToFilter
import argparse import argparse
import uuid import uuid
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment