Skip to content

Commit

Permalink
HostingCapacity: New implementation of hosting capacity map
Browse files Browse the repository at this point in the history
  • Loading branch information
jenny-nyx committed Jan 23, 2025
1 parent 0446032 commit c18ff3f
Show file tree
Hide file tree
Showing 2 changed files with 162 additions and 70 deletions.
29 changes: 16 additions & 13 deletions omf/models/hostingCapacity.html
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@
<label class="tooltip">Maximum kW Tested
<span class="classic">The maximum threshhold of KW added to determine max hosting capacity</span>
</label>
<input type="text" id="modelBasedHCMaxTestkw" name="modelBasedHCMaxTestkw" value="{{allInputDataDict.modelBasedHCMaxTestkw}}" pattern="^[0-9]*$"/>
<input type="text" id="model_basedHCMaxTestkw" name="model_basedHCMaxTestkw" value="{{allInputDataDict.model_basedHCMaxTestkw}}" pattern="^[0-9]*$"/>
</div>
<div class="wideInput">
<p class="inputSectionHeader">Downline Load Hosting Capacity</p>
Expand All @@ -149,13 +149,16 @@
{% if modelStatus == 'finished' %}
<style>td, th {padding:7 0 5 20;text-align: left;font-size:0.8em; border: 1px solid #cccccc;} </style>
<div id="output">
<!--
We want the map to be displayed at the top, but it'll only display if model-based is there since its created there
-->
<p class="reportTitle" style="page-break-before:always">Hosting Capacity Map</p>
<div id="modelBasedHCMap" class="tightContent"></div>
<div id="hostingCapacityMap" class="tightContent"></div>
<script>
var mapContent = allOutputData["modelBasedHCMap"]
var mapContent = allOutputData["hostingCapacityMap"]
var iframe = document.createElement('iframe');
iframe.style = 'width:1000px; height:800px; border-radius:8px;'
document.getElementById('modelBasedHCMap').appendChild(iframe);
document.getElementById('hostingCapacityMap').appendChild(iframe);
iframe.contentWindow.document.open();
iframe.contentWindow.document.write(mapContent);
iframe.contentWindow.document.close();
Expand Down Expand Up @@ -199,25 +202,25 @@
{% endif %}
{% if allInputDataDict['runModelBasedAlgorithm'] == 'on' %}
<p class="reportTitle">Model-Based Hosting Capacity Runtime ( H:M:S:MS )</p>
<div id="modelBasedRunTime" class="tightContent">
<p style="padding: 4px; align-items: center;"> {{ allOutputDataDict['modelBasedRuntime'] }} </p>
<div id="model_basedRunTime" class="tightContent">
<p style="padding: 4px; align-items: center;"> {{ allOutputDataDict['model_basedRuntime'] }} </p>
</div>
<p class="reportTitle">Model-Based Hosting Capacity By Bus</p>
<div id="modelBasedHCGraph" class="tightContent">
<div id="modelBasedHCGraph" style="width:1000px"></div>
<div id="model_basedHCGraph" class="tightContent">
<div id="model_basedHCGraph" style="width:1000px"></div>
<script type="text/javascript">
Plotly.newPlot("modelBasedHCGraph", JSON.parse( allOutputData["modelBasedGraphData"]) )
Plotly.newPlot("model_basedHCGraph", JSON.parse( allOutputData["model_basedGraphData"]) )
</script>
</div>
<p class="reportTitle">Model-Based Hosting Capacity Full Data Table</p>
<div id="modelBasedHostingCapacityTable" class="tightContent" style="height:300px; overflow-y: scroll;">
<div id="model_basedHostingCapacityTable" class="tightContent" style="height:300px; overflow-y: scroll;">
<table style="width:100%;">
<tr>
{% for header in allOutputDataDict["modelBasedHCTableHeadings"] %}
{% for header in allOutputDataDict["model_basedHCTableHeadings"] %}
<th>{{ header }}</th>
{% endfor %}
</tr>
{% for values in allOutputDataDict["modelBasedHCTableValues"] %}
{% for values in allOutputDataDict["model_basedHCTableValues"] %}
<tr>
{% for val in values %}
<td>{{ val }}</td>
Expand All @@ -229,7 +232,7 @@
{% endif %}
{% if allInputDataDict['runDownlineAlgorithm'] == 'on' %}
<p class="reportTitle">Downline Load Hosting Capacity Runtime ( H:M:S:MS )</p>
<div id="modelBasedRunTime" class="tightContent">
<div id="model_basedRunTime" class="tightContent">
<p style="padding: 4px; align-items: center;";> {{ allOutputDataDict['downline_runtime'] }} </p>
</div>
<p class="reportTitle">Downline Load Hosting Capacity Full Data Table</p>
Expand Down
203 changes: 146 additions & 57 deletions omf/models/hostingCapacity.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
''' Calculate hosting capacity using modelBased and/or AMI-based methods. '''
''' Calculate hosting capacity using model_based and/or AMI-based methods. '''
import shutil
import plotly as py
import plotly.express as px
Expand All @@ -16,20 +16,136 @@
from omf.solvers import mohca_cl

# Model metadata:
tooltip = "Calculate hosting capacity using modelBased and/or AMI-based methods."
tooltip = "Calculate hosting capacity using model_based and/or AMI-based methods."
modelName, template = __neoMetaModel__.metadata(__file__)
hidden = False

def convert_seconds_to_hms_ms( seconds ):
milliseconds = seconds * 1000

# Calculate hours, minutes, seconds, and milliseconds
hours, remainder = divmod(milliseconds, 3600000)
minutes, remainder = divmod(remainder, 60000)
seconds, milliseconds = divmod(remainder, 1000)

return "{:02d}:{:02d}:{:02d}.{:03d}".format(int(hours), int(minutes), int(seconds), int(milliseconds))

def hosting_capacity_map( modelDir, inputDict, outData ):
feederName = [x for x in os.listdir(modelDir) if x.endswith('.omd')][0]
path_to_omd = Path(modelDir, feederName)
starting_omd = json.load(open(path_to_omd))

if inputDict['runAmiAlgorithm'] == 'on':
model_free_data = Path(modelDir, 'output_mohca.csv')
model_free_data_df = pd.read_csv(model_free_data)
with open(path_to_omd) as f:
omd = json.load(f)
buses_from_omd_tree = []
for k, v in omd['tree'].items():
if v['object'] == 'bus':
buses_from_omd_tree.append(v['name'])
model_free_buses = model_free_data_df['busname'].unique()
# Finding the buses from mohca thats in the circuit. this is what we will color
model_free_buses_in_omd_file = list(set(model_free_buses).intersection(buses_from_omd_tree))
if model_free_buses_in_omd_file != 0:
# Great, there are some buses in the mohca file that are in the omd file. That means we can have some color!
# Note: If buses are in the omd and not in the Model-Free results, nothing is done.
# If buses are in the Model-Free results and not in the omd, nothing is done.
# If this were to change:
# - These are the buses that are in the model-free results and not in the omd file they would have to get dropped.
# buses_in_mohca_not_in_omd = set(mohca_buses) - set(buses_from_omd_tree)
attachment_keys = {
"coloringFiles": {
"output_mohca.csv": {
"csv": "<content>",
"colorOnLoadColumnIndex": "1"
}
}
}
model_free_color_data = Path(modelDir, 'output_mohca.csv').read_text()
# adding the mohca color output to the attachment
attachment_keys['coloringFiles']['output_mohca.csv']['csv'] = model_free_color_data
new_omd_path = Path(modelDir, 'color.omd')
starting_omd['attachments'] = attachment_keys
with open(new_omd_path, 'w+') as out_file:
json.dump(starting_omd, out_file, indent=4)
omf.geo.map_omd(new_omd_path, modelDir, open_browser=False)
outData['hostingCapacityMap'] = open(Path(modelDir, "geoJson_offline.html"), 'r').read()

if inputDict['runModelBasedAlgorithm'] == 'on':
# This code is copied from what previously existed in run_model_based_algorithm
# Except now, we are gonna check if color.omd is created.
# If it is, then we just need to add the results to the already existing stuff
# If its not, that means we need to create the map fresh. This would also mean that model_free_flag is false.
colorPath = Path(modelDir, 'color.omd')
if colorPath.exists():
# Just add model-based coloring
model_based_color = {
"color_by_model_based.csv": {
"csv": "<content>",
"colorOnLoadColumnIndex": "0"
}
}
model_based_color_data = Path(modelDir, 'color_by_model_based.csv').read_text()
original_file_data = json.load( open(colorPath) )
original_file_data['attachments']['coloringFiles'].update(model_based_color)
original_file_data['attachments']['coloringFiles']['color_by_model_based.csv']['csv'] = model_based_color_data
with open(colorPath, 'w+') as out_file:
json.dump(original_file_data, out_file, indent=4)
omf.geo.map_omd(colorPath, modelDir, open_browser=False )
outData['hostingCapacityMap'] = open(Path(modelDir, "geoJson_offline.html"), 'r' ).read()
else:
attachment_keys = {
"coloringFiles": {
"color_by_model_based.csv": {
"csv": "<content>",
"colorOnLoadColumnIndex": "1"
},
}
}

model_based_color_data = Path(modelDir, 'color_by_model_based.csv').read_text()
attachment_keys['coloringFiles']['color_by_model_based.csv']['csv'] = model_based_color_data
new_omd_path = Path(modelDir, 'color.omd')
starting_omd['attachments'] = attachment_keys
with open(new_omd_path, 'w+') as out_file:
json.dump(starting_omd, out_file, indent=4)
omf.geo.map_omd(new_omd_path, modelDir, open_browser=False )
outData['hostingCapacityMap'] = open(Path(modelDir, "geoJson_offline.html"), 'r' ).read()
if inputDict['runDownlineAlgorithm'] == 'on':
# Same thing as the above if. We need to check if its created already. If it is, we just adding the color, if its not, we need to create the map setup as well
colorPath = Path(modelDir, 'color.omd')
if colorPath.exists():
downline_color_data = Path(modelDir, 'output_downline_load.csv').read_text()
downline_color = {
"output_downline_load.csv": {
"csv": "<content>",
"colorOnLoadColumnIndex": "0"
}
}
original_file_data = json.load( open(colorPath) )
original_file_data['attachments']['coloringFiles'].update(downline_color)
original_file_data['attachments']['coloringFiles']['output_downline_load.csv']['csv'] = downline_color_data
with open(colorPath, 'w+') as out_file:
json.dump(original_file_data, out_file, indent=4)
omf.geo.map_omd(colorPath, modelDir, open_browser=False )
outData['hostingCapacityMap'] = open(Path(modelDir, "geoJson_offline.html"), 'r' ).read()
else:
attachment_keys = {
"coloringFiles": {
"output_downline_load.csv": {
"csv": "<content>",
"colorOnLoadColumnIndex": "1"
},
}
}
downline_load_color_data = Path(modelDir, 'output_downline_load.csv').read_text()
attachment_keys['coloringFiles']['output_downline_load.csv']['csv'] = downline_load_color_data
new_omd_path = Path(modelDir, 'color.omd')
starting_omd['attachments'] = attachment_keys
with open(new_omd_path, 'w+') as out_file:
json.dump(starting_omd, out_file, indent=4)
omf.geo.map_omd(new_omd_path, modelDir, open_browser=False )
outData['hostingCapacityMap'] = open(Path(modelDir, "geoJson_offline.html"), 'r' ).read()

def run_downline_load_algorithm( modelDir, inputDict, outData ):
feederName = [x for x in os.listdir(modelDir) if x.endswith('.omd')][0]
path_to_omd = Path(modelDir, feederName)
Expand Down Expand Up @@ -73,21 +189,7 @@ def run_downline_load_algorithm( modelDir, inputDict, outData ):
for i in indexes:
sorted_downlineDF = sorted_downlineDF.drop(i)
sorted_downlineDF.to_csv(Path(modelDir, 'output_downline_load.csv'), index=False)
downline_color_data = Path(modelDir, 'output_downline_load.csv').read_text()
downline_color = {
"downline_load.csv": {
"csv": "<content>",
"colorOnLoadColumnIndex": "0"
}
}
original_file = Path(modelDir, 'color_test.omd') # This should have already been made
original_file_data = json.load( open(original_file) )
original_file_data['attachments']['coloringFiles'].update(downline_color)
original_file_data['attachments']['coloringFiles']['downline_load.csv']['csv'] = downline_color_data
with open(original_file, 'w+') as out_file:
json.dump(original_file_data, out_file, indent=4)
omf.geo.map_omd(original_file, modelDir, open_browser=False )
outData['modelBasedHCMap'] = open(Path(modelDir, "geoJson_offline.html"), 'r' ).read()

outData['downline_tableHeadings'] = downline_output.columns.values.tolist()
outData['downline_tableValues'] = (list(sorted_downlineDF.itertuples(index=False, name=None)))
outData['downline_runtime'] = convert_seconds_to_hms_ms( downline_end_time - downline_start_time )
Expand All @@ -97,7 +199,7 @@ def run_ami_algorithm( modelDir, inputDict, outData ):
# mohca data-driven hosting capacity
inputPath = Path(modelDir, inputDict['AMIDataFileName'])
inputAsString = inputPath.read_text()
outputPath = Path(modelDir, 'output_MoCHa.csv')
outputPath = Path(modelDir, 'output_mohca.csv')
try:
csvValidateAndLoad(inputAsString, modelDir=modelDir, header=0, nrows=None, ncols=None, dtypes=[], return_type='df', ignore_nans=True, save_file=None, ignore_errors=False )
except:
Expand All @@ -116,7 +218,6 @@ def run_ami_algorithm( modelDir, inputDict, outData ):
errorMessage = "Algorithm name error"
raise Exception(errorMessage)
AMI_end_time = time.time()

AMI_results = pd.read_csv( outputPath, index_col=False)
AMI_results.rename(columns={'kw_hostable': 'voltage_cap_kW'}, inplace=True)
histogramFigure = px.histogram( AMI_results, x='voltage_cap_kW', template="simple_white", color_discrete_sequence=["MediumPurple"] )
Expand All @@ -135,46 +236,30 @@ def run_ami_algorithm( modelDir, inputDict, outData ):
outData['AMI_tableValues'] = ( list(AMI_results_sorted.itertuples(index=False, name=None)) )
outData['AMI_runtime'] = convert_seconds_to_hms_ms( AMI_end_time - AMI_start_time )

def run_modelBased_algorithm( modelDir, inputDict, outData ):
def run_model_based_algorithm( modelDir, inputDict, outData ):
feederName = [x for x in os.listdir(modelDir) if x.endswith('.omd')][0]
inputDict['feederName1'] = feederName[:-4]
path_to_omd = Path(modelDir, feederName)
tree = opendss.dssConvert.omdToTree(path_to_omd)
opendss.dssConvert.treeToDss(tree, Path(modelDir, 'circuit.dss'))
modelBased_start_time = time.time()
modelBasedHCResults = opendss.hosting_capacity_all( FNAME = Path(modelDir, 'circuit.dss'), max_test_kw=int(inputDict["modelBasedHCMaxTestkw"]), multiprocess=False)
modelBased_end_time = time.time()
model_based_start_time = time.time()
model_basedHCResults = opendss.hosting_capacity_all( FNAME = Path(modelDir, 'circuit.dss'), max_test_kw=int(inputDict["model_basedHCMaxTestkw"]), multiprocess=False)
model_based_end_time = time.time()
# - opendss.hosting_capacity_all() changes the cwd, so change it back so other code isn't affected
modelBasedHCDF = pd.DataFrame( modelBasedHCResults )
sorted_modelBasedHCDF = modelBasedHCDF.sort_values(by='bus')
sorted_modelBasedHCDF.to_csv( "output_tradHC.csv")
modelBasedHCFigure = px.bar( sorted_modelBasedHCDF, x='bus', y='max_kw', barmode='group', template='simple_white', color_discrete_sequence=["green"] )
modelBasedHCFigure.update_xaxes(categoryorder='array', categoryarray=sorted_modelBasedHCDF.bus.values)
color_df = sorted_modelBasedHCDF[['bus','max_kw']]
color_df.to_csv(Path(modelDir, 'color_by_modelBased.csv'), index=False)
attachment_keys = {
"coloringFiles": {
"color_by_modelBased.csv": {
"csv": "<content>",
"colorOnLoadColumnIndex": "1"
},
}
}
data = Path(modelDir, 'color_by_modelBased.csv').read_text()
attachment_keys['coloringFiles']['color_by_modelBased.csv']['csv'] = data
omd = json.load(open(path_to_omd))
new_path = Path(modelDir, 'color_test.omd')
omd['attachments'] = attachment_keys
with open(new_path, 'w+') as out_file:
json.dump(omd, out_file, indent=4)
omf.geo.map_omd(new_path, modelDir, open_browser=False )

outData['modelBasedHCMap'] = open(Path(modelDir, "geoJson_offline.html"), 'r' ).read()
outData['modelBasedGraphData'] = json.dumps(modelBasedHCFigure, cls=py.utils.PlotlyJSONEncoder )
outData['modelBasedHCTableHeadings'] = sorted_modelBasedHCDF.columns.values.tolist()
outData['modelBasedHCTableValues'] = (list(sorted_modelBasedHCDF.itertuples(index=False, name=None)))
outData['modelBasedRuntime'] = convert_seconds_to_hms_ms( modelBased_end_time - modelBased_start_time )
outData['modelBasedHCResults'] = modelBasedHCResults

model_basedHCDF = pd.DataFrame( model_basedHCResults )
sorted_model_basedHCDF = model_basedHCDF.sort_values(by='bus')
sorted_model_basedHCDF.to_csv( "output_tradHC.csv")
model_basedHCFigure = px.bar( sorted_model_basedHCDF, x='bus', y='max_kw', barmode='group', template='simple_white', color_discrete_sequence=["green"] )
model_basedHCFigure.update_xaxes(categoryorder='array', categoryarray=sorted_model_basedHCDF.bus.values)
# These files need to be made for coloring the map.
color_df = sorted_model_basedHCDF[['bus','max_kw']]
color_df.to_csv(Path(modelDir, 'color_by_model_based.csv'), index=False)
outData['model_basedGraphData'] = json.dumps(model_basedHCFigure, cls=py.utils.PlotlyJSONEncoder )
outData['model_basedHCTableHeadings'] = sorted_model_basedHCDF.columns.values.tolist()
outData['model_basedHCTableValues'] = (list(sorted_model_basedHCDF.itertuples(index=False, name=None)))
outData['model_basedRuntime'] = convert_seconds_to_hms_ms( model_based_end_time - model_based_start_time )
outData['model_basedHCResults'] = model_basedHCResults

def runtimeEstimate(modelDir):
''' Estimated runtime of model in minutes. '''
Expand All @@ -185,9 +270,13 @@ def work(modelDir, inputDict):
if inputDict['runAmiAlgorithm'] == 'on':
run_ami_algorithm(modelDir, inputDict, outData)
if inputDict.get('runModelBasedAlgorithm', outData) == 'on':
run_modelBased_algorithm(modelDir, inputDict, outData)
run_model_based_algorithm(modelDir, inputDict, outData)
if inputDict.get('runDownlineAlgorithm') == 'on':
run_downline_load_algorithm( modelDir, inputDict, outData)

# TODO: All are False, then there's no map.
hosting_capacity_map(modelDir=modelDir, inputDict=inputDict, outData=outData)

outData['stdout'] = "Success"
outData['stderr'] = ""
return outData
Expand All @@ -212,7 +301,7 @@ def new(modelDir):
"runModelBasedAlgorithm": 'on',
"runAmiAlgorithm": 'on',
"runDownlineAlgorithm": 'on',
"modelBasedHCMaxTestkw": 50000,
"model_basedHCMaxTestkw": 50000,
"dgInverterSetting": 'unityPF',
"der_pf": 0.95,
"vv_points": "0.8,0.44,0.92,0.44,0.98,0,1.02,0,1.08,-0.44,1.2,-0.44",
Expand Down

0 comments on commit c18ff3f

Please sign in to comment.