cartesius.py 6.91 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
"""CarbonTracker Data Assimilation Shell (CTDAS) Copyright (C) 2017 Wouter Peters. 
Users are recommended to contact the developers (wouter.peters@wur.nl) to receive
updates of the code. See also: http://www.carbontracker.eu. 

This program is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software Foundation, 
version 3. This program is distributed in the hope that it will be useful, but 
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS 
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. 

You should have received a copy of the GNU General Public License along with this 
program. If not, see <http://www.gnu.org/licenses/>."""
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
#!/usr/bin/env python
# cartesius.py

"""
Author : peters 

Revision History:
File created on 06 Sep 2010.

"""

import logging
import subprocess

from da.baseclasses.platform import Platform

29
std_joboptions = {'jobname':'test', 'jobaccount':'co2', 'jobtype':'serial', 'jobshell':'/bin/sh', 'depends':'', 'jobtime':'24:00:00', 'jobinput':'/dev/null', 'jobnodes':'1', 'jobtasks':'', 'modulenetcdf':'netcdf/4.1.2', 'networkMPI':'','jobqueue': 'normal'}
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56


class CartesiusPlatform(Platform):
    def __init__(self):
        self.ID = 'cartesius'    # the identifier gives the platform name
        self.version = '1.0'     # the platform version used


    def give_blocking_flag(self):
        """
        Returns a blocking flag, which is important if tm5 is submitted in a queue system. The python ctdas code is forced to wait before tm5 run is finished

	    -on Huygens: return "-s"
            -on Maunaloa: return "" (no queue available)
            -on Jet/Zeus: return
        """
        return ""

    def give_queue_type(self):
        """
        Return a queue type depending whether your computer system has a queue system, or whether you prefer to run in the foreground. 
        On most large systems using the queue is mandatory if you run a large job.
            -on Huygens: return "queue"
            -on Maunaloa: return "foreground" (no queue available)
            -on Jet/Zeus: return  

        """
57
        return "foreground"
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97

    def get_job_template(self, joboptions={}, block=False):
        """ 
        Returns the job template for a given computing system, and fill it with options from the dictionary provided as argument.
        The job template should return the preamble of a job that can be submitted to a queue on your platform, 
        examples of popular queuing systems are:
            - SGE
            - MOAB
            - XGrid
            -

        A list of job options can be passed through a dictionary, which are then filled in on the proper line,
        an example is for instance passing the dictionary {'account':'co2'} which will be placed 
        after the ``-A`` flag in a ``qsub`` environment.

        An extra option ``block`` has been added that allows the job template to be configured to block the current
        job until the submitted job in this template has been completed fully.
        """
        
        #template = """## \n"""+ \
        #           """## This is a set of dummy names, to be replaced by values from the dictionary \n"""+ \
        #           """## Please make your own platform specific template with your own keys and place it in a subfolder of the da package.\n """+ \
        #           """## \n"""+ \
        #           """ \n"""+ \
        #           """#$ jobname \n"""+ \
        #           """#$ jobaccount \n"""+ \
        #           """#$ jobnodes \n"""+ \
        #           """#$ jobtime \n"""+ \
        #           """#$ jobshell \n"""+ \
        #           """\n"""+ \
        #           """source /usr/bin/sh\n"""+ \
        #           """module load python\n"""+ \
        #           """\n"""

       
        template = """#!/bin/bash \n""" + \
                   """## \n""" + \
                   """## This is a set of dummy names, to be replaced by values from the dictionary \n""" + \
                   """## Please make your own platform specific template with your own keys and place it in a subfolder of the da package.\n """ + \
                   """## \n""" + \
Peters, Wouter's avatar
Peters, Wouter committed
98
                   """#SBATCH -J jobname \n""" + \
99
                   """#SBATCH -p jobqueue \n""" + \
Peters, Wouter's avatar
Peters, Wouter committed
100
101
102
                   """#SBATCH -n jobnodes \n""" + \
                   """#SBATCH -t jobtime \n""" + \
                   """#SBATCH -o joblog \n""" + \
103
                   """module load python\n""" + \
Peters, Wouter's avatar
Peters, Wouter committed
104
                   """module load nco\n""" + \
105
106
107
108
109
110
		   """\n"""

        if 'depends' in joboptions:
            template += """#$ -hold_jid depends \n"""

        # First replace from passed dictionary
111
        for k, v in list(joboptions.items()):
112
113
114
115
            while k in template:
                template = template.replace(k, v)

        # Fill remaining values with std_options
116
        for k, v in list(std_joboptions.items()):
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
            while k in template:
                template = template.replace(k, v)

        return template


    def submit_job(self, jobfile, joblog=None, block=False):
        """ This method submits a jobfile to the queue, and returns the queue ID """


        #cmd     = ["llsubmit","-s",jobfile]
        #msg = "A new task will be started (%s)"%cmd  ; logging.info(msg)

        if block:
            cmd = ["salloc",'-n',std_joboptions['jobnodes'],'-t',std_joboptions['jobtime'], jobfile]
            logging.info("A new task will be started (%s)" % cmd)
            output = subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()[0]
            logging.info(output)
135
            print('output', output)
136
            jobid = output.split()[-1]             
137
            print('jobid', jobid)
138
139
        else:
            cmd = ["sbatch", jobfile]
Peters, Wouter's avatar
Peters, Wouter committed
140
            logging.info("A new job will be submitted (%s)" % cmd)
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
            output = subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()[0]  ; logging.info(output)
            jobid = output.split()[-1]
            
        return jobid







#        jobid   = output.split()[2]
#        retcode = output.split()[-1]
#	
#         #for huygens
#	print 'output', output
#	test   = output.split()[3]
#        dummy, jobid =test.split('nl.')
#	jobid='%s%s' %('"',jobid)
#	submitmsg ='%s%s%s'%(output.split()[4],output.split()[5],output.split()[6])
#        if submitmsg=='hasbeensubmitted.':
#	    retcode=2
#        print 'retcode',submitmsg,retcode	 
#        return retcode
#
#    def KillJob(self,jobid):
#        """ This method kills a running job """
#
#        output = subprocess.Popen(['qdel',jobid], stdout=subprocess.PIPE).communicate()[0]  ; logging.info(output)
#
#        return output
#
#    def StatJob(self,jobid):
#        """ This method gets the status of a running job """
#        import subprocess
#
#        #output = subprocess.Popen(['sgestat'], stdout=subprocess.PIPE).communicate()[0]  ; logging.info(output)
#
#        return ''
   


if __name__ == "__main__":
    pass