From db01442d9b68f9d062f20aec2180ce76e66dcd4b Mon Sep 17 00:00:00 2001 From: Ed Chalstrey Date: Fri, 18 Mar 2022 16:18:44 +0000 Subject: [PATCH] flake8 --- .github/workflows/etl.yml | 2 +- etl/filter_mastermap.py | 8 +++++--- etl/get_test_polygons.py | 19 +++++++++++++------ 3 files changed, 19 insertions(+), 10 deletions(-) diff --git a/.github/workflows/etl.yml b/.github/workflows/etl.yml index 9b8208d4..0d56a433 100644 --- a/.github/workflows/etl.yml +++ b/.github/workflows/etl.yml @@ -16,7 +16,7 @@ jobs: python -m pip install -r requirements.txt - name: Run Flake8 run: | - ls etl/*py | grep -v 'join_building_data' | xargs flake8 + ls etl/*py | grep -v 'join_building_data' | xargs flake8 --exclude etl/__init__.py - name: Run tests run: | python -m pytest \ No newline at end of file diff --git a/etl/filter_mastermap.py b/etl/filter_mastermap.py index 2167a53c..3315a6ca 100644 --- a/etl/filter_mastermap.py +++ b/etl/filter_mastermap.py @@ -4,13 +4,13 @@ """ import csv import glob -import json import os import sys csv.field_size_limit(sys.maxsize) + def main(mastermap_path): mm_paths = sorted(glob.glob(os.path.join(mastermap_path, "*.gml.csv"))) for mm_path in mm_paths: @@ -19,7 +19,8 @@ def main(mastermap_path): def filter_mastermap(mm_path): - output_path = "{}.filtered.csv".format(str(mm_path).replace(".gml.csv", "")) + output_path = "{}.filtered.csv" + output_path.format(str(mm_path).replace(".gml.csv", "")) output_fieldnames = ('WKT', 'fid', 'descriptiveGroup') # Open the input csv with all polygons, buildings and others with open(mm_path, 'r') as fh: @@ -32,7 +33,8 @@ def filter_mastermap(mm_path): try: if 'Building' in line['descriptiveGroup']: w.writerow(line) - except TypeError: # when descriptiveGroup is missing, ignore this Polygon + # when descriptiveGroup is missing, ignore this Polygon + except TypeError: pass diff --git a/etl/get_test_polygons.py b/etl/get_test_polygons.py index 6b1b34e3..388b9872 100644 --- a/etl/get_test_polygons.py +++ b/etl/get_test_polygons.py @@ -25,11 +25,12 @@ gdf = osmnx.footprints_from_point(point=point, dist=dist) # preview image gdf_proj = osmnx.projection.project_gdf(gdf, to_crs={'init': 'epsg:3857'}) -gdf_proj = gdf_proj[gdf_proj.geometry.apply(lambda g: g.geom_type != 'MultiPolygon')] +gdf_proj = gdf_proj[gdf_proj.geometry.apply(lambda g: g.geom_type != 'MultiPolygon')] # noqa -fig, ax = osmnx.plot_footprints(gdf_proj, bgcolor='#333333', color='w', figsize=(4,4), - save=True, show=False, close=True, - filename='test_buildings_preview', dpi=600) +fig, ax = osmnx.plot_footprints(gdf_proj, bgcolor='#333333', + color='w', figsize=(4, 4), + save=True, show=False, close=True, + filename='test_buildings_preview', dpi=600) # save test_dir = os.path.dirname(__file__) @@ -50,7 +51,13 @@ gdf_to_save.rename( # convert to CSV test_data_csv = str(os.path.join(test_dir, 'test_buildings.3857.csv')) subprocess.run(["rm", test_data_csv]) -subprocess.run(["ogr2ogr", "-f", "CSV", test_data_csv, test_data_geojson, "-lco", "GEOMETRY=AS_WKT"]) +subprocess.run( + ["ogr2ogr", "-f", "CSV", test_data_csv, + test_data_geojson, "-lco", "GEOMETRY=AS_WKT"] +) # add SRID for ease of loading to PostgreSQL -subprocess.run(["sed", "-i", "s/^\"POLYGON/\"SRID=3857;POLYGON/", test_data_csv]) +subprocess.run( + ["sed", "-i", "s/^\"POLYGON/\"SRID=3857;POLYGON/", + test_data_csv] +)