Skip to content
Snippets Groups Projects
Commit d66b83db authored by Roelofsen, Hans's avatar Roelofsen, Hans
Browse files

adding Azure batch scripting tailored towards ViewScape

parent ee5c212b
No related branches found
No related tags found
No related merge requests found
......@@ -8,56 +8,57 @@ call conda activate mrt
REM OUTPUTS
SET jaar=2018
SET og_vlak_out=OG%jaar%_vlak_250cm.tif
SET og_lijn_out=OG%jaar%_lijn_250cm.tif
SET BBOut=BB%jaar%_250cm.tif
SET T10VOut=t10_%jaar%_vlak_sel
SET T10LOut=t10_%jaar%_lijn_sel
SET out1=
REM DIRECTORIES
SET OGDir=w:/PROJECTS/Landschapsmonitor/cIndicatoren/OpgaandGroen/a_Geodata
SET BBDir=w:/PROJECTS/Landschapsmonitor/cIndicatoren/Bebouwing/Geodata_Kadaster_22juni_2021
if %jaar%==2018 (SET T10Dir=W:/PROJECTS/GeoDeskData/TOP10NL/TOP10NL_2018-sept/TOP10NL_Kopie.gdb) else (SET T10Dir=W:/PROJECTS/GeoDeskData/TOP10NL/TOP10NL_2019_Sep/TOP10NL_uncompr.gdb)
if %jaar%==2018 (SET T10Dir=W:/PROJECTS/GeoDeskData/TOP10NL/TOP10NL_november2017/TOP10NL.gdb) else (SET T10Dir=W:/PROJECTS/GeoDeskData/TOP10NL/TOP10NL_2019_Sep/TOP10NL_uncompr.gdb)
SET scratch_dir=c:/apps/temp_geodata/openheid21/Data/b_intern/scratch
SET OutDir=w:/PROJECTS/Landschapsmonitor/cIndicatoren/Openheid/c_viewscape_input/opgaand_elementen/b_methode_HR_2021/v%jaar%/c_final
SET gdal_dir=C:/Program Files/QGIS 3.10/apps/Python37/Scripts
REM minimum aantal 2.5m cellen in een 25m cel voor kwalificatie als gesloten
SET drempelwaarde_vlakken=30
SET drempelwaarde_lijnen=8
SET drempelwaarde_vlakken=6
SET drempelwaarde_lijnen=2
SET drempelwaarde_combi=6
SET timestamp=%date:~0, 4%%date:~5,2%%date:~8,2%t%time:~0,2%%time:~3,2%%time:~6,2%
REM Opgaand Groen
gdal_rasterize -at -l Vlakken -of GTiff -te 10000 300000 280000 625000 -tr 2.5 2.5 -ts 108000 130000 -ot Byte -a_srs epsg:28992 -co COMPRESS=LZW -burn 1 %OGDir%/MonitoringsbronOpgaandGroen_0101%jaar%.gdb %scratch_dir%/%og_vlak_out%
gdal_rasterize -at -l Lijnen -of GTiff -te 10000 300000 280000 625000 -tr 2.5 2.5 -ts 108000 130000 -ot Byte -a_srs epsg:28992 -co COMPRESS=LZW -burn 1 %OGDir%/MonitoringsbronOpgaandGroen_0101%jaar%.gdb %scratch_dir%/%og_lijn_out%
REM 1 RASTERIZE Opgaand Groen
gdal_rasterize -at -l Vlakken -of GTiff -te 10000 300000 280000 625000 -tr 2.5 2.5 -ot Byte -a_srs epsg:28992 -co COMPRESS=LZW -burn 1 %OGDir%/MonitoringsbronOpgaandGroen_0101%jaar%.gdb %scratch_dir%/%og_vlak_out%
gdal_rasterize -at -l Lijnen -of GTiff -te 10000 300000 280000 625000 -tr 2.5 2.5 -ot Byte -a_srs epsg:28992 -co COMPRESS=LZW -burn 1 %OGDir%/MonitoringsbronOpgaandGroen_0101%jaar%.gdb %scratch_dir%/%og_lijn_out%
REM Bebouwing
REM RASTERIZE Bebouwing
gdal_rasterize -at -l Monitoringsbron%jaar% -of GTiff -te 10000 300000 280000 625000 -tr 2.5 2.5 -ts 108000 130000 -ot Byte -a_srs epsg:28992 -co COMPRESS=LZW -burn 1 %BBDir%/MonitoringsbronBebouwing_2018-2019_dd20120325.gdb %scratch_dir%/%BBOut%
REM Top10 Vlak
ogr2ogr -sql "SELECT * FROM TERREIN_VLAK WHERE TYPELANDGEBRUIK IN ('fruitkwekerij', 'boomgaard', 'boomkwekerij')" -select TYPELANDGEBRUIK %scratch_dir%/%T10VOut%.shp %T10Dir% TERREIN_VLAK
REM RASTERIZE Top10 Vlak
ogr2ogr -sql "SELECT * FROM TERREIN_VLAK WHERE TYPELANDGEBRUIK IN ('fruitkwekerij', 'boomgaard', 'boomkwekerij', 'dodenakker met bos')" -select TYPELANDGEBRUIK %scratch_dir%/%T10VOut%.shp %T10Dir% TERREIN_VLAK
gdal_rasterize -at -of GTiff -te 10000 300000 280000 625000 -tr 2.5 2.5 -ts 108000 130000 -ot Byte -a_srs epsg:28992 -co COMPRESS=LZW -burn 1 %scratch_dir%/t10_2018_vlak_sel.shp %scratch_dir%/%T10VOut%.tif
REM Top10 Lijn
REM RASTERIZE Top10 Lijn
ogr2ogr -sql "SELECT * FROM INRICHTINGSELEMENT_LIJN WHERE TYPEINRICHTINGSELEMENT IN ('muur', 'geluidswering')" -select TYPEINRICHTINGSELEMENT %scratch_dir%/%T10LOut%.shp %T10Dir% INRICHTINGSELEMENT_LIJN
gdal_rasterize -at -of GTiff -te 10000 300000 280000 625000 -tr 2.5 2.5 -ts 108000 130000 -ot Byte -a_srs epsg:28992 -co COMPRESS=LZW -burn 1 %scratch_dir%/%T10LOut%.shp %scratch_dir%/%T10LOut%.tif
REM Add vlakvormige bronrasters together
REM COMBINE Add vlakvormige bronrasters together
python "%gdal_dir%/gdal_calc.py" --co COMPRESS=LZW -A %scratch_dir%/%og_vlak_out% -B %scratch_dir%/%BBOut% -C %scratch_dir%/%T10VOut%.tif --outfile=%scratch_dir%/vlakken_%jaar%_combi.tif --calc="numpy.where((A+B+C)>=1, 1, 0)"
REM Add lijnvormige bronrasters together
REM COMBINE Add lijnvormige bronrasters together
python "%gdal_dir%/gdal_calc.py" --co COMPRESS=LZW -A %scratch_dir%/%og_lijn_out% -B %scratch_dir%/%T10LOut%.tif --outfile=%scratch_dir%/lijnen_%jaar%_combi.tif --calc="numpy.where((A+B)>=1, 1, 0)"
REM Aggregate lijnen and vlakken to 25m with SUM aggregation method
REM AGGREGATE lijnen and vlakken to 25m with SUM aggregation method
gdalwarp -co COMPRESS=LZW -tr 25 25 -te 10000 300000 280000 625000 -r sum %scratch_dir%/vlakken_%jaar%_combi.tif %scratch_dir%/vlakken_combi_%jaar%_25m.tif
gdalwarp -co COMPRESS=LZW -tr 25 25 -te 10000 300000 280000 625000 -r sum %scratch_dir%/lijnen_%jaar%_combi.tif %scratch_dir%/lijnen_combi_%jaar%_25m.tif
REM Combineer lijnen en vlakken om gebieden op te sporen waar lijnen of vlakken individueel niet voldoende zijn om de drempelwaarde(s) te overschrijden
python "%gdal_dir%/gdal_calc.py" --co COMPRESS=LZW -A %scratch_dir%/vlakken_combi_%jaar%_25m.tif -B %scratch_dir%/lijnen_combi_%jaar%_25m.tif --outfile=%scratch_dir%/lijn_vlak_combi_%jaar%_25m.tif --calc="A+b"
REM Apply treshold
python "%gdal_dir%/gdal_calc.py" --co COMPRESS=LZW -A %scratch_dir%/vlakken_combi_%jaar%_25m.tif --outfile=%scratch_dir%/vlakken_%jaar%_gte_th.tif --calc="numpy.where(A >= %drempelwaarde_vlakken%, 1, 0)"
python "%gdal_dir%/gdal_calc.py" --co COMPRESS=LZW -A %scratch_dir%/lijnen_combi_%jaar%_25m.tif --outfile=%scratch_dir%/lijnen_%jaar%_gte_th.tif --calc="numpy.where(A >= %drempelwaarde_lijnen%, 1, 0)"
python "%gdal_dir%/gdal_calc.py" --co COMPRESS=LZW -A %scratch_dir%/lijn_vlak_combi_%jaar%_25m.tif --outfile=%scratch_dir%/lijnen_vlakken_%jaar%_gte_th.tif --calc="numpy.where(A >= %drempelwaarde_combi%, 1, 0)"
REM Combine. Output pxl value 2 means 'gesloten' 1 means 'open'.
python "%gdal_dir%/gdal_calc.py" --co COMPRESS=LZW --format EHdr --type Float32 -A %scratch_dir%/vlakken_%jaar%_gte_th.tif -B %scratch_dir%/lijnen_%jaar%_gte_th.tif --outfile=%OutDir%/opgaande_elementen_%jaar%_%timestamp%.flt --calc="numpy.where((A+B) >=1, 2, 1)"
python "%gdal_dir%/gdal_calc.py" --co COMPRESS=LZW --format EHdr --type Float32 -A %scratch_dir%/vlakken_%jaar%_gte_th.tif -B %scratch_dir%/lijnen_%jaar%_gte_th.tif -C %scratch_dir%/lijnen_vlakken_%jaar%_gte_th.tif--outfile=%OutDir%/opgaande_elementen_%jaar%_%timestamp%.flt --calc="numpy.where((A+B+C) >=1, 2, 1)"
pause
\ No newline at end of file
$connectTestResult = Test-NetConnection -ComputerName viewscapediag.file.core.windows.net -Port 445
if ($connectTestResult.TcpTestSucceeded) {
# Save the password so the drive will persist on reboot
cmd.exe /C "cmdkey /add:`"viewscapediag.file.core.windows.net`" /user:`"localhost\viewscapediag`" /pass:`"WaxZFarUCCOvJPdzH36uGroOMYwqOBzpEkk7vudY2K8dhryp0rihBPtNXgrIO8s4xChT8axaDVAWkRBdnbgohQ==`""
# Mount the drive
New-PSDrive -Name Q -PSProvider FileSystem -Root "\\viewscapediag.file.core.windows.net\viewscapedata" -Persist
} else {
Write-Error -Message "Unable to reach the Azure storage account via port 445. Check to make sure your organization or ISP is not blocking port 445, or use Azure P2S VPN, Azure S2S VPN, or Express Route to tunnel SMB traffic over a different port."
}
\ No newline at end of file
copy *.* %AZ_BATCH_NODE_SHARED_DIR%
cd %AZ_BATCH_NODE_SHARED_DIR%
7za x DataFiles.7z
\ No newline at end of file
cd %AZ_BATCH_TASK_WORKING_DIR%
fart -i -r %1 SHARED_DIR %AZ_BATCH_NODE_SHARED_DIR%
fart -i -r %1 WORKING_DIR %AZ_BATCH_TASK_WORKING_DIR%
fart -i -r %1 OUTPUT_NAME %2
REM 7za.exe a -r -t7z -m0=LZMA2:d64k:fb32 -ms=8m -mmt=30 -mx=9 "%AZ_BATCH_TASK_WORKING_DIR%\%3" "%AZ_BATCH_TASK_WORKING_DIR%\*.*"
ViewScapeCon.exe %AZ_BATCH_TASK_WORKING_DIR%\%1
PowerShell -NoProfile -ExecutionPolicy Bypass -Command "& 'DriveConnect.ps1'"
xcopy /e /i /y /q %AZ_BATCH_TASK_WORKING_DIR%\*.* "\\viewscapediag.file.core.windows.net\viewscapedata\!Results\*.*"
[ViewScape01]
Eyelevel = 1.50
Foreground = 0.0
Middleground = 0.0
MaxDistance = 2200
Alpha=1.0
ModelType=3
VisibleWidthFraction=0.0
OwnCellCanBlock=false
MaxSlope=0.06
TypeMaxHeight=0.50
AllPointsOut=0
"""
Python interface to MS Azure Batch and Blob accounts, configured to run Viewscape in batch mode.
Original script: https://docs.microsoft.com/en-us/azure/batch/quick-run-python
Modified for Viewscape: Rene Jochem, WEnR
Further mods: Hans Roelofsen, WEnR, 16-Aug-2021
"""
from __future__ import print_function
import datetime
import io
import os
import sys
import time
import config
import argparse
try:
input = raw_input
except NameError:
pass
import azure.storage.blob as azureblob
import azure.batch.batch_service_client as batch
import azure.batch.batch_auth as batchauth
import azure.batch.models as batchmodels
sys.path.append('.')
sys.path.append('..')
# Update the Batch and Storage account credential strings in config.py with values
# unique to your accounts. These are used when constructing connection strings
# for the Batch and Storage client objects.
def query_yes_no(question, default="yes"):
"""
Prompts the user for yes/no input, displaying the specified question text.
:param str question: The text of the prompt for input.
:param str default: The default if the user hits <ENTER>. Acceptable values
are 'yes', 'no', and None.
:rtype: str
:return: 'yes' or 'no'
"""
valid = {'y': 'yes', 'n': 'no'}
if default is None:
prompt = ' [y/n] '
elif default == 'yes':
prompt = ' [Y/n] '
elif default == 'no':
prompt = ' [y/N] '
else:
raise ValueError("Invalid default answer: '{}'".format(default))
while 1:
choice = input(question + prompt).lower()
if default and not choice:
return default
try:
return valid[choice[0]]
except (KeyError, IndexError):
print("Please respond with 'yes' or 'no' (or 'y' or 'n').\n")
def print_batch_exception(batch_exception):
"""
Prints the contents of the specified Batch exception.
:param batch_exception:
"""
print('-------------------------------------------')
print('Exception encountered:')
if batch_exception.error and \
batch_exception.error.message and \
batch_exception.error.message.value:
print(batch_exception.error.message.value)
if batch_exception.error.values:
print()
for mesg in batch_exception.error.values:
print('{}:\t{}'.format(mesg.key, mesg.value))
print('-------------------------------------------')
def upload_file_to_container(block_blob_client, container_name, file_path):
"""
Uploads a local file to an Azure Blob storage container.
:param block_blob_client: A blob service client.
:type block_blob_client: `azure.storage.blob.BlockBlobService`
:param str container_name: The name of the Azure Blob storage container.
:param str file_path: The local path to the file.
:rtype: `azure.batch.models.ResourceFile`
:return: A ResourceFile initialized with a SAS URL appropriate for Batch
tasks.
"""
blob_name = os.path.basename(file_path)
print('Uploading file {} to container [{}]...'.format(file_path,
container_name))
block_blob_client.create_blob_from_path(container_name,
blob_name,
file_path)
# Obtain the SAS token for the container.
sas_token = get_container_sas_token(block_blob_client,
container_name, azureblob.BlobPermissions.READ)
sas_url = block_blob_client.make_blob_url(container_name,
blob_name,
sas_token=sas_token)
return batchmodels.ResourceFile(file_path=blob_name,
http_url=sas_url)
def get_container_sas_token(block_blob_client,
container_name, blob_permissions):
"""
Obtains a shared access signature granting the specified permissions to the
container.
:param block_blob_client: A blob service client.
:type block_blob_client: `azure.storage.blob.BlockBlobService`
:param str container_name: The name of the Azure Blob storage container.
:param BlobPermissions blob_permissions:
:rtype: str
:return: A SAS token granting the specified permissions to the container.
"""
# Obtain the SAS token for the container, setting the expiry time and
# permissions. In this case, no start time is specified, so the shared
# access signature becomes valid immediately. Expiration is in 2 hours.
container_sas_token = \
block_blob_client.generate_container_shared_access_signature(
container_name,
permission=blob_permissions,
expiry=datetime.datetime.utcnow() + datetime.timedelta(hours=2))
return container_sas_token
def get_container_sas_url(block_blob_client,
container_name, blob_permissions):
"""
Obtains a shared access signature URL that provides write access to the
ouput container to which the tasks will upload their output.
:param block_blob_client: A blob service client.
:type block_blob_client: `azure.storage.blob.BlockBlobService`
:param str container_name: The name of the Azure Blob storage container.
:param BlobPermissions blob_permissions:
:rtype: str
:return: A SAS URL granting the specified permissions to the container.
"""
# Obtain the SAS token for the container.
sas_token = get_container_sas_token(block_blob_client,
container_name, azureblob.BlobPermissions.WRITE)
# Construct SAS URL for the container
container_sas_url = "https://{}.blob.core.windows.net/{}?{}".format(config._STORAGE_ACCOUNT_NAME, container_name,
sas_token)
return container_sas_url
def delete_container(block_blob_client, container_name):
"""
Delete a blob container in a block blob client
:param block_blob_client: A blob service client.
:type block_blob_client: `azure.storage.blob.BlockBlobService`
:param str container_name: The name of the Azure Blob storage container.
"""
print('Deleting container [{}]...'.format(container_name))
block_blob_client.delete_container(container_name)
def create_pool(batch_service_client, pool_id, application_files):
"""
Creates a pool of compute nodes with the specified OS settings.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str pool_id: An ID for the new pool.
:param str publisher: Marketplace image publisher
:param str offer: Marketplace image offer
:param str sku: Marketplace image sky
"""
print('Creating pool [{}]...'.format(pool_id))
# Create a new pool of Linux compute nodes using an Azure Virtual Machines
# Marketplace image. For more information about creating pools of Linux
# nodes, see:
# https://azure.microsoft.com/documentation/articles/batch-linux-nodes/
# The start task installs ffmpeg on each node from an available repository, using
# an administrator user identity.
new_pool = batch.models.PoolAddParameter(
id=pool_id,
virtual_machine_configuration=batchmodels.VirtualMachineConfiguration(
image_reference=batchmodels.ImageReference(
publisher="MicrosoftWindowsServer",
offer="WindowsServer",
sku="2019-Datacenter",
version="latest"
),
node_agent_sku_id="batch.node.windows amd64"),
vm_size=config._POOL_VM_SIZE,
target_dedicated_nodes=config._DEDICATED_POOL_NODE_COUNT,
target_low_priority_nodes=config._LOW_PRIORITY_POOL_NODE_COUNT,
start_task=batchmodels.StartTask(
command_line="cmd /c starttask.bat",
resource_files=application_files,
wait_for_success=True,
user_identity=batchmodels.UserIdentity(
auto_user=batchmodels.AutoUserSpecification(
scope=batchmodels.AutoUserScope.pool,
elevation_level=batchmodels.ElevationLevel.admin)),
)
)
batch_service_client.pool.add(new_pool)
def create_job(batch_service_client, job_id, pool_id):
"""
Creates a job with the specified ID, associated with the specified pool.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str job_id: The ID for the job.
:param str pool_id: The ID for the pool.
"""
print('Creating job [{}]...'.format(job_id))
job = batch.models.JobAddParameter(
id=job_id,
pool_info=batch.models.PoolInformation(pool_id=pool_id))
batch_service_client.job.add(job)
def add_tasks(batch_service_client, job_id, run_files, output_container_sas_url):
"""
Adds a task for each input file in the collection to the specified job.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str job_id: The ID of the job to which to add the tasks.
:param list input_files: A collection of input files. One task will be
created for each input file.
:param output_container_sas_token: A SAS token granting write access to
the specified Azure Blob storage container.
"""
print('Adding {} tasks to job [{}]...'.format(len(run_files), job_id))
tasks = list()
for idx, input_file in enumerate(run_files):
input_file_path = input_file.file_path
output_file_path = "VSOutFile{}.7z".format(idx)
output_id = "VSOutFile{}".format(idx)
command = "cmd /c task.bat {} {} {}".format(input_file_path, output_id, output_file_path)
tasks.append(batch.models.TaskAddParameter(
id='Task{}'.format(idx),
command_line=command,
resource_files=[input_file],
output_files=[batchmodels.OutputFile(
file_pattern=output_file_path,
destination=batchmodels.OutputFileDestination(
container=batchmodels.OutputFileBlobContainerDestination(
container_url=output_container_sas_url)),
upload_options=batchmodels.OutputFileUploadOptions(
upload_condition=batchmodels.OutputFileUploadCondition.task_success))]
)
)
batch_service_client.task.add_collection(job_id, tasks)
def wait_for_tasks_to_complete(batch_service_client, job_id, timeout):
"""
Returns when all tasks in the specified job reach the Completed state.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str job_id: The id of the job whose tasks should be monitored.
:param timedelta timeout: The duration to wait for task completion. If all
tasks in the specified job do not reach Completed state within this time
period, an exception will be raised.
"""
timeout_expiration = datetime.datetime.now() + timeout
print("Monitoring all tasks for 'Completed' state, timeout at {}..."
.format(timeout_expiration.strftime('%H:%M:%S')), end='')
while datetime.datetime.now() < timeout_expiration:
print('.', end='')
sys.stdout.flush()
tasks = batch_service_client.task.list(job_id)
incomplete_tasks = [task for task in tasks if
task.state != batchmodels.TaskState.completed]
if not incomplete_tasks:
print()
return True
else:
time.sleep(1)
print()
raise RuntimeError("ERROR: Tasks did not reach 'Completed' state within "
"timeout period of " + str(timeout))
if __name__ == '__main__':
# TODO arguments: running time, Job ID, runfile_directory_name!
parser = argparse.ArgumentParser()
parser.add_argument('--time', help='time out period in minutes', type=int, default=30)
parser.add_argument('--id', help='job ID', type=str, default='Viewscape')
parser.add_argument('--rundir', help='directory with runfile *.ini', type=str, default='./RunFiles')
parser.add_argument('--remove', help='remove batch pool, job and blob containers', action='store_true')
args = parser.parse_args()
start_time = datetime.datetime.now().replace(microsecond=0)
print('Sample start: {}'.format(start_time))
print()
# Update config
setattr(config, '_JOB_ID', args.id)
# Create the blob client, for use in obtaining references to
# blob storage containers and uploading files to containers.
blob_client = azureblob.BlockBlobService(
account_name=config._STORAGE_ACCOUNT_NAME,
account_key=config._STORAGE_ACCOUNT_KEY)
# Use the blob client to create the containers in Azure Storage if they
# don't yet exist.
application_container_name = 'application'
run_container_name = 'run'
data_container_name = 'data'
output_container_name = 'output'
blob_client.create_container(application_container_name, fail_on_exist=False)
blob_client.create_container(run_container_name, fail_on_exist=False)
blob_client.create_container(data_container_name, fail_on_exist=False)
blob_client.create_container(output_container_name, fail_on_exist=False)
print('Container [{}] created.'.format(application_container_name))
print('Container [{}] created.'.format(run_container_name))
print('Container [{}] created.'.format(data_container_name))
print('Container [{}] created.'.format(output_container_name))
# Create a list of all application files in the ApplicationFiles directory.
application_file_paths = []
for folder, subs, files in os.walk(os.path.join(sys.path[0], 'ApplicationFiles')):
for filename in files:
application_file_paths.append(os.path.abspath(os.path.join(folder, filename)))
# Create a list of all input files in the RunFiles directory.
run_file_paths = []
for folder, subs, files in os.walk(args.rundir):
for filename in files:
if filename.endswith(".ini") and filename.startswith('ViewShed'):
run_file_paths.append(os.path.abspath(os.path.join(folder, filename)))
# Create a list of all application files in the ApplicationFiles directory.
data_file_paths = []
for folder, subs, files in os.walk(os.path.join(sys.path[0], 'DataFiles')):
for filename in files:
data_file_paths.append(os.path.abspath(os.path.join(folder, filename)))
# Upload the input files. This is the collection of files that are to be processed by the tasks.
run_files = [
upload_file_to_container(blob_client, run_container_name, file_path)
for file_path in run_file_paths]
# Upload the application files. This is the collection of files that are needed to run the application.
application_files = [
upload_file_to_container(blob_client, application_container_name, file_path)
for file_path in application_file_paths]
# Upload the data files. This is the collection of files that are needed to run the application.
#data_files = [
# upload_file_to_container(blob_client, application_container_name, file_path)
#for file_path in data_file_paths]
# Obtain a shared access signature URL that provides write access to the output
# container to which the tasks will upload their output.
output_container_sas_url = get_container_sas_url(
blob_client,
output_container_name,
azureblob.BlobPermissions.WRITE)
# Create a Batch service client. We'll now be interacting with the Batch
# service in addition to Storage
credentials = batchauth.SharedKeyCredentials(config._BATCH_ACCOUNT_NAME,
config._BATCH_ACCOUNT_KEY)
batch_client = batch.BatchServiceClient(
credentials,
batch_url=config._BATCH_ACCOUNT_URL)
print('Output container [{}]...'.format(output_container_sas_url))
try:
# Create the pool that will contain the compute nodes that will execute the
# tasks.
create_pool(batch_client, config._POOL_ID, application_files)
# Create the job that will run the tasks.
create_job(batch_client, config._JOB_ID, config._POOL_ID)
# Add the tasks to the job. Pass the input files and a SAS URL
# to the storage container for output files.
add_tasks(batch_client, config._JOB_ID, run_files, output_container_sas_url)
# Pause execution until tasks reach Completed state.
wait_for_tasks_to_complete(batch_client,
config._JOB_ID,
datetime.timedelta(minutes=args.time))
print(" Success! All tasks reached the 'Completed' state within the "
"specified timeout period.")
# Delete run and data containers on blob client
delete_container(blob_client, run_container_name)
delete_container(blob_client, data_container_name)
# Clean up Batch resources
batch_client.job.delete(config._JOB_ID)
batch_client.pool.delete(config._POOL_ID)
# Print out some timing info
end_time = datetime.datetime.now().replace(microsecond=0)
print('Sample end: {}'.format(end_time))
print('Elapsed time: {}'.format(end_time - start_time))
print()
input('Press ENTER to exit...')
except (batchmodels.BatchErrorException, RuntimeError) as err:
if isinstance(err, batchmodels.BatchErrorException):
print_batch_exception(err)
else:
print(err)
sys.exit(1)
if args.remove:
# Delete run and data containers on blob client
delete_container(blob_client, run_container_name)
delete_container(blob_client, data_container_name)
# Clean up Batch resources
batch_client.job.delete(config._JOB_ID)
batch_client.pool.delete(config._POOL_ID)
# -------------------------------------------------------------------------
#
# THIS CODE AND INFORMATION ARE PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
# ----------------------------------------------------------------------------------
# The example companies, organizations, products, domain names,
# e-mail addresses, logos, people, places, and events depicted
# herein are fictitious. No association with any real company,
# organization, product, domain name, email address, logo, person,
# places, or events is intended or should be inferred.
# --------------------------------------------------------------------------
# Global constant variables (Azure Storage account/Batch details)
# import "config.py" in "python_quickstart_client.py "
_BATCH_ACCOUNT_NAME = 'viewscapebatch' # Your batch account name
_BATCH_ACCOUNT_KEY = 'sJ3KiVMWIE1s1/3FFrZxYZ3uOyfYBT983IAMHFegnhrjGKqFgWvkIdbGU0yBKsdJ5/c4DMeCTNxv8hvkhcsRnQ==' # Your batch account key
_BATCH_ACCOUNT_URL = 'https://viewscapebatch.westeurope.batch.azure.com' # Your batch account URL
_STORAGE_ACCOUNT_NAME = 'viewscapediag' # Your storage account name
_STORAGE_ACCOUNT_KEY = 'WaxZFarUCCOvJPdzH36uGroOMYwqOBzpEkk7vudY2K8dhryp0rihBPtNXgrIO8s4xChT8axaDVAWkRBdnbgohQ==' # Your storage account key
_POOL_ID = 'ViewscapePool' # Your Pool ID
_POOL_NODE_COUNT = 2 # Pool node count
_POOL_VM_SIZE = 'STANDARD_A1_v2' # VM Type/Size
_JOB_ID = 'Viewscape' # Job ID
_STANDARD_OUT_FILE_NAME = 'stdout.txt' # Standard Output file
_DEDICATED_POOL_NODE_COUNT = 1
_LOW_PRIORITY_POOL_NODE_COUNT = 0
azure-batch==6.0.0
azure-storage-blob==1.4.0
\ No newline at end of file
[ViewShed01]
projectname = SHARED_DIR\DataFiles\Param_aug2011.ini
casename = ViewScape01
minid = 1
maxid = 40000
dogridfilematrix = 1
viewpointsfilename = SHARED_DIR\DataFiles\punten100m_nl.shp
landscapegridname = SHARED_DIR\DataFiles\gesloten09.flt
typeheighttablename = SHARED_DIR\DataFiles\Hoogten12.dbf
elevationgridname = SHARED_DIR\DataFiles\terrein_h2o.flt
rawdatatablename = <NONE>
countviewgridname = WORKING_DIR\count_OUTPUT_NAME.flt
horizontablename = WORKING_DIR\horizon_OUTPUT_NAME.shp
landscapeareastablename = <NONE>
\ No newline at end of file
[ViewShed01]
projectname = SHARED_DIR\DataFiles\Param_aug2011.ini
casename = ViewScape01
minid = 40000
maxid = 80000
dogridfilematrix = 1
viewpointsfilename = SHARED_DIR\DataFiles\punten100m_nl.shp
landscapegridname = SHARED_DIR\DataFiles\gesloten09.flt
typeheighttablename = SHARED_DIR\DataFiles\Hoogten12.dbf
elevationgridname = SHARED_DIR\DataFiles\terrein_h2o.flt
rawdatatablename = <NONE>
countviewgridname = WORKING_DIR\count_OUTPUT_NAME.flt
horizontablename = WORKING_DIR\horizon_OUTPUT_NAME.shp
landscapeareastablename = <NONE>
\ No newline at end of file
[ViewShed01]
projectname = SHARED_DIR\DataFiles\Param_aug2011.ini
casename = ViewScape01
minid = 80000
maxid = 120000
dogridfilematrix = 1
viewpointsfilename = SHARED_DIR\DataFiles\punten100m_nl.shp
landscapegridname = SHARED_DIR\DataFiles\gesloten09.flt
typeheighttablename = SHARED_DIR\DataFiles\Hoogten12.dbf
elevationgridname = SHARED_DIR\DataFiles\terrein_h2o.flt
rawdatatablename = <NONE>
countviewgridname = WORKING_DIR\count_OUTPUT_NAME.flt
horizontablename = WORKING_DIR\horizon_OUTPUT_NAME.shp
landscapeareastablename = <NONE>
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment