Finish ep simulation, finish results, and a bit of code refactoring

This commit is contained in:
Koa Wells 2023-04-14 16:23:26 -04:00
parent 4519d012a2
commit 175041e2df
5 changed files with 250 additions and 27 deletions

View File

@ -2,8 +2,12 @@ import json
import random
import datetime
from pathlib import Path
import pandas as pd
import platform
import os
from hub.imports.geometry_factory import GeometryFactory
from hub.imports.weather_factory import WeatherFactory
from hub.imports.construction_factory import ConstructionFactory
from hub.imports.usage_factory import UsageFactory
from hub.imports.results_factory import ResultFactory
@ -13,6 +17,7 @@ from hub.helpers.data.montreal_function_to_hub_function import MontrealFunctionT
from sra import Sra
from meb import Meb
from meb_results import Results as MEBResults
class EnergyValidation:
def __init__(self):
@ -23,6 +28,12 @@ class EnergyValidation:
self.climate_file = Path(f'{self.storage_path}/{self.climate_file_name}.cli').resolve()
self.meb_folder = Path('./results/meb').resolve()
self.ep_folder = Path('./results/ep').resolve()
self.result_file = Path('./results/energy_validation_results.xlsx').resolve()
if platform.system() == 'Windows':
self.encoding = 'windows-1252'
else:
self.encoding = 'utf-8'
def _sort_buildings(self, buildings_to_simulate):
sorted_buildings = {}
@ -33,9 +44,129 @@ class EnergyValidation:
sorted_buildings[code_utili].append(building)
return sorted_buildings
def run(self, building_set, building_quantities):
def _save_meb_results(self, demand, metadata, building_area):
results = []
building_name = metadata.iloc[0,0].split(': ')[1]
# start by formatting the meb results
# convert from Wh to kWh/m^2
demand *= 0.001/building_area
# replace indexes with month/day/year format
months = {'month':
['1/1/2023',
'2/1/2023',
'3/1/2023',
'4/1/2023',
'5/1/2023',
'6/1/2023',
'7/1/2023',
'8/1/2023',
'9/1/2023',
'10/1/2023',
'11/1/2023',
'12/1/2023'
]
}
demand.iloc[:, 0] = pd.DataFrame(months)
# insert building_name to first column
demand.insert(0, 'building_name', building_name)
# swap lighting and appliances columns
demand[f'{building_name} lighting electrical demand Wh'], \
demand[f'{building_name} appliances electrical demand Wh'] = \
demand[f'{building_name} appliances electrical demand Wh'], \
demand[f'{building_name} lighting electrical demand Wh']
# insert simulation source to last column
demand['source'] = 'meb'
# format building metadata
'''
metadata format:
building_id
number_of_storeys
m2_per_storey
total_m2
total_m3
year_of_construction
building_usage
TODO: number_of_adjacent_walls
'''
formatted_metadata = pd.DataFrame({
'metadata':
[
metadata.iloc[0, 0].split(': ')[1],
metadata.iloc[4, 0].split(': ')[1],
metadata.iloc[3, 0].split(': ')[1],
building_area,
metadata.iloc[6, 0].split(': ')[1],
metadata.iloc[1, 0].split(': ')[1],
metadata.iloc[2, 0].split(': ')[1]
]}).transpose()
# last, but not least, append our lovely reformatted data to the results spreadsheet
with pd.ExcelWriter(self.result_file, engine='openpyxl', if_sheet_exists='overlay', mode='a') as writer:
demand.to_excel(
writer,
startrow=writer.sheets['Simulation data'].max_row,
sheet_name='Simulation data',
index=False,
header=False,
)
formatted_metadata.to_excel(
writer,
startrow=writer.sheets['Metadata'].max_row,
sheet_name='Metadata',
index=False,
header=False,
)
def _save_ep_results(self, demand, building_area, building_name):
demand.drop('Date/Time', axis=1, inplace=True)
# convert from J to kWh/m^2
demand *= 2.77778e-7/building_area
# replace indexes with month/day/year format
months = [
'1/1/2023',
'2/1/2023',
'3/1/2023',
'4/1/2023',
'5/1/2023',
'6/1/2023',
'7/1/2023',
'8/1/2023',
'9/1/2023',
'10/1/2023',
'11/1/2023',
'12/1/2023'
]
demand.insert(0, 'month', months)
# insert building_name to first column
demand.insert(0, 'building_name', building_name)
# TODO: add water usage once working from ep
demand['water_usage'] = 'NA'
# add simulation source as ep
demand['source'] = 'ep'
# last, but not least, append our lovely reformatted data to the results spreadsheet
with pd.ExcelWriter(self.result_file, engine='openpyxl', if_sheet_exists='overlay', mode='a') as writer:
demand.to_excel(
writer,
startrow=writer.sheets['Simulation data'].max_row,
sheet_name='Simulation data',
index=False,
header=False,
)
def run(self, building_set, building_quantities, cleanup=True):
sorted_buildings = self._sort_buildings(building_set)
building_to_simulate = []
min_m2_satisfied = False
for code_utili in building_quantities:
@ -43,6 +174,9 @@ class EnergyValidation:
print(f'CODE_UTILI:{code_utili} is not found in the provided dataset.')
else:
for building in range(building_quantities[code_utili]):
building_to_simulate = []
min_m2_satisfied = False
# only select buildings with an area of 500 m^2 or more
while not min_m2_satisfied:
building_to_simulate.append(sorted_buildings[code_utili][random.randrange(
@ -63,58 +197,71 @@ class EnergyValidation:
geojson_file.write(json.dumps(geojson, indent=2))
geojson_file.close()
# run enrichment factories
city = GeometryFactory('geojson',
path=f'tmp/{building_id}_energy_validation.geojson',
height_field='building_height',
year_of_construction_field='ANNEE_CONS',
function_field='CODE_UTILI',
function_to_hub=MontrealFunctionToHubFunction().dictionary).city
WeatherFactory('epw', city, file_name='./CAN_PQ_Montreal.Intl.AP.716270_CWEC.epw').enrich()
ConstructionFactory('nrcan', city).enrich()
UsageFactory('nrcan', city).enrich()
if city.climate_reference_city is None:
city.name = f'Concordia current status {building_id}'
city.name = f'{building_id}_energy_validation'
city.climate_reference_city = city.location
self.climate_file_name = city.location
city.climate_file = self.climate_file
city.name = f'{building_id}_energy_validation'
# starting sra
print(f'{building_id} starting sra')
start = datetime.datetime.now()
ExportsFactory('sra', city, self.tmp_folder, weather_file=self.weather_file, weather_format='epw').export()
ExportsFactory('sra', city, self.tmp_folder,
weather_file=self.weather_file, weather_format='epw').export()
sra_file = (self.tmp_folder / f'{city.name}_sra.xml').resolve()
sra_start = datetime.datetime.now()
Sra(sra_file, self.tmp_folder).run()
sra_end = datetime.datetime.now() - sra_start
ResultFactory('sra', city, self.tmp_folder).enrich()
sra_time = datetime.datetime.now() - start
print(f"{building_id} SRA time: {sra_time}\n")
# run meb
print(f'{building_id} starting meb')
start = datetime.datetime.now()
for building in city.buildings:
building.attic_heated = 0
building.basement_heated = 1
EnergyBuildingsExportsFactory('insel_monthly_energy_balance', city, self.meb_folder).export()
Meb(self.meb_folder).run()
EnergyBuildingsExportsFactory('insel_monthly_energy_balance', city, self.tmp_folder).export()
meb_start = datetime.datetime.now()
Meb(self.tmp_folder).run()
meb_end = datetime.datetime.now() - meb_start
ResultFactory('insel_meb', city, self.tmp_folder).enrich()
results = MEBResults(city, Path('./results/meb/').resolve())
results.print()
meb_time = datetime.datetime.now() - start
print(f"{building_id} meb time: {meb_time}\n")
# save meb results to energy_validation_results
total_m2 = city.buildings[0].internal_zones[0].thermal_zones[0].total_floor_area
meb_results = pd.read_csv(Path(f'{self.meb_folder}/demand.csv').resolve(),
encoding=self.encoding)
meb_metadata = pd.read_csv(Path(f'{self.meb_folder}/metadata.csv').resolve(),
encoding=self.encoding)
self._save_meb_results(meb_results, meb_metadata, total_m2)
# run energyplus
print(f'{building_id} starting energy plus')
EnergyBuildingsExportsFactory('idf', city, self.ep_folder).export()
idf_file = (self.tmp_folder / f'{city.name}.idf').resolve
#EP(self.ep_folder).run()
idf_file = EnergyBuildingsExportsFactory('idf', city, self.ep_folder).export_debug()
ep_start = datetime.datetime.now()
idf_file.run()
ep_end = datetime.datetime.now() - ep_start
building_to_simulate.clear()
# save ep results to energy_validation_results
ep_results = pd.read_csv(Path(f'{self.ep_folder}/{building_id}_energy_validation_mtr.csv').resolve(),
encoding=self.encoding)
self._save_ep_results(ep_results, total_m2, city.buildings[0].name)
data_file = open('data/VMTrial_cleaned.geojson', 'r')
city = json.load(data_file)
buildings = city['features']
print(f"{building_id} sra time: {sra_end}")
print(f"{building_id} meb time: {meb_end}")
print(f"{building_id} ep time: {ep_end}")
test_batch = {
'1000': 50,
'4413': 10,
'1921': 5
}
test = EnergyValidation()
test.run(building_set=buildings, building_quantities=test_batch)
if cleanup is True:
[os.remove(os.path.join(self.tmp_folder, file)) for file in os.listdir(self.tmp_folder)]

1
ep.py
View File

@ -11,6 +11,7 @@ class EP:
"""
self._file_path = file_path
self._output_file_path = output_file_path
if platform.system() == 'Linux':
self._executable = 'energyplus'
elif platform.system() == 'Windows':

19
main.py Normal file
View File

@ -0,0 +1,19 @@
import json
from pathlib import Path
from energy_validation import EnergyValidation
# load the dataset you want to simulate
data_file = open(Path('./data/VMTrial_cleaned.geojson').resolve(), 'r')
city = json.load(data_file)
buildings = city['features']
# input the usage type and quantities that you would like to simulate
test_batch = {
'1000': 10,
'4413': 10,
'1921': 5
}
validate_meb_ep = EnergyValidation()
# if cleanup is True, removes all files in tmp directory
validate_meb_ep.run(building_set=buildings, building_quantities=test_batch, cleanup=True)

56
meb_results.py Normal file
View File

@ -0,0 +1,56 @@
from pathlib import Path
import pandas as pd
import hub.helpers.constants as cte
class Results:
def __init__(self, city, path):
self._city = city
self._path = path
def print(self):
print_results = None
file = 'city name: ' + self._city.name + '\n'
for building in self._city.buildings:
if cte.MONTH in building.heating.keys():
heating_results = building.heating[cte.MONTH].rename(columns={cte.INSEL_MEB: f'{building.name} heating Wh'})
cooling_results = building.cooling[cte.MONTH].rename(columns={cte.INSEL_MEB: f'{building.name} cooling Wh'})
lighting_results = building.lighting_electrical_demand[cte.MONTH]\
.rename(columns={cte.INSEL_MEB: f'{building.name} lighting electrical demand Wh'})
appliances_results = building.appliances_electrical_demand[cte.MONTH]\
.rename(columns={cte.INSEL_MEB: f'{building.name} appliances electrical demand Wh'})
dhw_results = building.domestic_hot_water_heat_demand[cte.MONTH]\
.rename(columns={cte.INSEL_MEB: f'{building.name} domestic hot water demand Wh'})
else:
array = [None] * 12
heating_results = pd.DataFrame(array, columns=[f'{building.name} heating Wh'])
cooling_results = pd.DataFrame(array, columns=[f'{building.name} cooling Wh'])
lighting_results = pd.DataFrame(array, columns=[f'{building.name} lighting electrical demand Wh'])
appliances_results = pd.DataFrame(array, columns=[f'{building.name} appliances electrical demand Wh'])
dhw_results = pd.DataFrame(array, columns=[f'{building.name} domestic hot water demand Wh'])
if print_results is None:
print_results = heating_results
else:
print_results = pd.concat([print_results, heating_results], axis='columns')
print_results = pd.concat([print_results,
cooling_results,
lighting_results,
appliances_results,
dhw_results], axis='columns')
file += '\n'
file += f'name: {building.name}\n'
file += f'year of construction: {building.year_of_construction}\n'
file += f'function: {building.function}\n'
file += f'floor area: {building.floor_area}\n'
if building.average_storey_height is not None and building.eave_height is not None:
file += f'storeys: {int(building.eave_height / building.average_storey_height)}\n'
else:
file += f'storeys: n/a\n'
file += f'heated_volume: {0.85 * building.volume}\n'
file += f'volume: {building.volume}\n'
full_path_results = Path(self._path / 'demand.csv').resolve()
print_results.to_csv(full_path_results, na_rep='null')
full_path_metadata = Path(self._path / 'metadata.csv').resolve()
with open(full_path_metadata, 'w') as metadata_file:
metadata_file.write(file)