repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
fermiPy/fermipy
fermipy/diffuse/name_policy.py
NameFactory.sourcekey
def sourcekey(self, **kwargs): """ Return a key that specifies the name and version of a source or component """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) try: return NameFactory.sourcekey_format.format(**kwargs_copy) except KeyError: return None
python
def sourcekey(self, **kwargs): """ Return a key that specifies the name and version of a source or component """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) try: return NameFactory.sourcekey_format.format(**kwargs_copy) except KeyError: return None
[ "def", "sourcekey", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs_copy", "=", "self", ".", "base_dict", ".", "copy", "(", ")", "kwargs_copy", ".", "update", "(", "*", "*", "kwargs", ")", "self", ".", "_replace_none", "(", "kwargs_copy", ")", ...
Return a key that specifies the name and version of a source or component
[ "Return", "a", "key", "that", "specifies", "the", "name", "and", "version", "of", "a", "source", "or", "component" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/name_policy.py#L189-L198
train
36,300
fermiPy/fermipy
fermipy/diffuse/name_policy.py
NameFactory.galprop_gasmap
def galprop_gasmap(self, **kwargs): """ return the file name for Galprop input gasmaps """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) localpath = NameFactory.galprop_gasmap_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
python
def galprop_gasmap(self, **kwargs): """ return the file name for Galprop input gasmaps """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) localpath = NameFactory.galprop_gasmap_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
[ "def", "galprop_gasmap", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs_copy", "=", "self", ".", "base_dict", ".", "copy", "(", ")", "kwargs_copy", ".", "update", "(", "*", "*", "kwargs", ")", "self", ".", "_replace_none", "(", "kwargs_copy", ...
return the file name for Galprop input gasmaps
[ "return", "the", "file", "name", "for", "Galprop", "input", "gasmaps" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/name_policy.py#L235-L244
train
36,301
fermiPy/fermipy
fermipy/diffuse/name_policy.py
NameFactory.merged_gasmap
def merged_gasmap(self, **kwargs): """ return the file name for Galprop merged gasmaps """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) localpath = NameFactory.merged_gasmap_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
python
def merged_gasmap(self, **kwargs): """ return the file name for Galprop merged gasmaps """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) localpath = NameFactory.merged_gasmap_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
[ "def", "merged_gasmap", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs_copy", "=", "self", ".", "base_dict", ".", "copy", "(", ")", "kwargs_copy", ".", "update", "(", "*", "*", "kwargs", ")", "self", ".", "_replace_none", "(", "kwargs_copy", "...
return the file name for Galprop merged gasmaps
[ "return", "the", "file", "name", "for", "Galprop", "merged", "gasmaps" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/name_policy.py#L246-L255
train
36,302
fermiPy/fermipy
fermipy/diffuse/name_policy.py
NameFactory.diffuse_template
def diffuse_template(self, **kwargs): """ return the file name for other diffuse map templates """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) localpath = NameFactory.diffuse_template_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
python
def diffuse_template(self, **kwargs): """ return the file name for other diffuse map templates """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) localpath = NameFactory.diffuse_template_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
[ "def", "diffuse_template", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs_copy", "=", "self", ".", "base_dict", ".", "copy", "(", ")", "kwargs_copy", ".", "update", "(", "*", "*", "kwargs", ")", "self", ".", "_replace_none", "(", "kwargs_copy", ...
return the file name for other diffuse map templates
[ "return", "the", "file", "name", "for", "other", "diffuse", "map", "templates" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/name_policy.py#L257-L266
train
36,303
fermiPy/fermipy
fermipy/diffuse/name_policy.py
NameFactory.spectral_template
def spectral_template(self, **kwargs): """ return the file name for spectral templates """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) localpath = NameFactory.spectral_template_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
python
def spectral_template(self, **kwargs): """ return the file name for spectral templates """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) localpath = NameFactory.spectral_template_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
[ "def", "spectral_template", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs_copy", "=", "self", ".", "base_dict", ".", "copy", "(", ")", "kwargs_copy", ".", "update", "(", "*", "*", "kwargs", ")", "localpath", "=", "NameFactory", ".", "spectral_t...
return the file name for spectral templates
[ "return", "the", "file", "name", "for", "spectral", "templates" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/name_policy.py#L268-L276
train
36,304
fermiPy/fermipy
fermipy/diffuse/name_policy.py
NameFactory.srcmdl_xml
def srcmdl_xml(self, **kwargs): """ return the file name for source model xml files """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) localpath = NameFactory.srcmdl_xml_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
python
def srcmdl_xml(self, **kwargs): """ return the file name for source model xml files """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) localpath = NameFactory.srcmdl_xml_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
[ "def", "srcmdl_xml", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs_copy", "=", "self", ".", "base_dict", ".", "copy", "(", ")", "kwargs_copy", ".", "update", "(", "*", "*", "kwargs", ")", "localpath", "=", "NameFactory", ".", "srcmdl_xml_format...
return the file name for source model xml files
[ "return", "the", "file", "name", "for", "source", "model", "xml", "files" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/name_policy.py#L278-L286
train
36,305
fermiPy/fermipy
fermipy/diffuse/name_policy.py
NameFactory.nested_srcmdl_xml
def nested_srcmdl_xml(self, **kwargs): """ return the file name for source model xml files of nested sources """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) localpath = NameFactory.nested_srcmdl_xml_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
python
def nested_srcmdl_xml(self, **kwargs): """ return the file name for source model xml files of nested sources """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) localpath = NameFactory.nested_srcmdl_xml_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
[ "def", "nested_srcmdl_xml", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs_copy", "=", "self", ".", "base_dict", ".", "copy", "(", ")", "kwargs_copy", ".", "update", "(", "*", "*", "kwargs", ")", "self", ".", "_replace_none", "(", "kwargs_copy",...
return the file name for source model xml files of nested sources
[ "return", "the", "file", "name", "for", "source", "model", "xml", "files", "of", "nested", "sources" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/name_policy.py#L288-L297
train
36,306
fermiPy/fermipy
fermipy/diffuse/name_policy.py
NameFactory.ft1file
def ft1file(self, **kwargs): """ return the name of the input ft1 file list """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs)) self._replace_none(kwargs_copy) localpath = NameFactory.ft1file_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
python
def ft1file(self, **kwargs): """ return the name of the input ft1 file list """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs)) self._replace_none(kwargs_copy) localpath = NameFactory.ft1file_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
[ "def", "ft1file", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs_copy", "=", "self", ".", "base_dict", ".", "copy", "(", ")", "kwargs_copy", ".", "update", "(", "*", "*", "kwargs", ")", "kwargs_copy", "[", "'dataset'", "]", "=", "kwargs", "....
return the name of the input ft1 file list
[ "return", "the", "name", "of", "the", "input", "ft1", "file", "list" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/name_policy.py#L299-L309
train
36,307
fermiPy/fermipy
fermipy/diffuse/name_policy.py
NameFactory.ft2file
def ft2file(self, **kwargs): """ return the name of the input ft2 file list """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['data_time'] = kwargs.get( 'data_time', self.dataset(**kwargs)) self._replace_none(kwargs_copy) localpath = NameFactory.ft2file_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
python
def ft2file(self, **kwargs): """ return the name of the input ft2 file list """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['data_time'] = kwargs.get( 'data_time', self.dataset(**kwargs)) self._replace_none(kwargs_copy) localpath = NameFactory.ft2file_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
[ "def", "ft2file", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs_copy", "=", "self", ".", "base_dict", ".", "copy", "(", ")", "kwargs_copy", ".", "update", "(", "*", "*", "kwargs", ")", "kwargs_copy", "[", "'data_time'", "]", "=", "kwargs", ...
return the name of the input ft2 file list
[ "return", "the", "name", "of", "the", "input", "ft2", "file", "list" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/name_policy.py#L311-L322
train
36,308
fermiPy/fermipy
fermipy/diffuse/name_policy.py
NameFactory.ccube
def ccube(self, **kwargs): """ return the name of a counts cube file """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs)) kwargs_copy['component'] = kwargs.get( 'component', self.component(**kwargs)) localpath = NameFactory.ccube_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
python
def ccube(self, **kwargs): """ return the name of a counts cube file """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs)) kwargs_copy['component'] = kwargs.get( 'component', self.component(**kwargs)) localpath = NameFactory.ccube_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
[ "def", "ccube", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs_copy", "=", "self", ".", "base_dict", ".", "copy", "(", ")", "kwargs_copy", ".", "update", "(", "*", "*", "kwargs", ")", "kwargs_copy", "[", "'dataset'", "]", "=", "kwargs", ".",...
return the name of a counts cube file
[ "return", "the", "name", "of", "a", "counts", "cube", "file" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/name_policy.py#L363-L374
train
36,309
fermiPy/fermipy
fermipy/diffuse/name_policy.py
NameFactory.mcube
def mcube(self, **kwargs): """ return the name of a model cube file """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs)) kwargs_copy['component'] = kwargs.get( 'component', self.component(**kwargs)) self._replace_none(kwargs_copy) localpath = NameFactory.mcube_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
python
def mcube(self, **kwargs): """ return the name of a model cube file """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs)) kwargs_copy['component'] = kwargs.get( 'component', self.component(**kwargs)) self._replace_none(kwargs_copy) localpath = NameFactory.mcube_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
[ "def", "mcube", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs_copy", "=", "self", ".", "base_dict", ".", "copy", "(", ")", "kwargs_copy", ".", "update", "(", "*", "*", "kwargs", ")", "kwargs_copy", "[", "'dataset'", "]", "=", "kwargs", ".",...
return the name of a model cube file
[ "return", "the", "name", "of", "a", "model", "cube", "file" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/name_policy.py#L404-L416
train
36,310
fermiPy/fermipy
fermipy/diffuse/name_policy.py
NameFactory.angprofile
def angprofile(self, **kwargs): """ return the file name for sun or moon angular profiles """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) localpath = NameFactory.angprofile_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
python
def angprofile(self, **kwargs): """ return the file name for sun or moon angular profiles """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) localpath = NameFactory.angprofile_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
[ "def", "angprofile", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs_copy", "=", "self", ".", "base_dict", ".", "copy", "(", ")", "kwargs_copy", ".", "update", "(", "*", "*", "kwargs", ")", "self", ".", "_replace_none", "(", "kwargs_copy", ")",...
return the file name for sun or moon angular profiles
[ "return", "the", "file", "name", "for", "sun", "or", "moon", "angular", "profiles" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/name_policy.py#L470-L479
train
36,311
fermiPy/fermipy
fermipy/diffuse/name_policy.py
NameFactory.template_sunmoon
def template_sunmoon(self, **kwargs): """ return the file name for sun or moon template files """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs)) kwargs_copy['component'] = kwargs.get( 'component', self.component(**kwargs)) self._replace_none(kwargs_copy) localpath = NameFactory.templatesunmoon_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
python
def template_sunmoon(self, **kwargs): """ return the file name for sun or moon template files """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs)) kwargs_copy['component'] = kwargs.get( 'component', self.component(**kwargs)) self._replace_none(kwargs_copy) localpath = NameFactory.templatesunmoon_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
[ "def", "template_sunmoon", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs_copy", "=", "self", ".", "base_dict", ".", "copy", "(", ")", "kwargs_copy", ".", "update", "(", "*", "*", "kwargs", ")", "kwargs_copy", "[", "'dataset'", "]", "=", "kwar...
return the file name for sun or moon template files
[ "return", "the", "file", "name", "for", "sun", "or", "moon", "template", "files" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/name_policy.py#L481-L493
train
36,312
fermiPy/fermipy
fermipy/diffuse/name_policy.py
NameFactory.residual_cr
def residual_cr(self, **kwargs): """Return the name of the residual CR analysis output files""" kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs)) kwargs_copy['component'] = kwargs.get( 'component', self.component(**kwargs)) localpath = NameFactory.residual_cr_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
python
def residual_cr(self, **kwargs): """Return the name of the residual CR analysis output files""" kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs)) kwargs_copy['component'] = kwargs.get( 'component', self.component(**kwargs)) localpath = NameFactory.residual_cr_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
[ "def", "residual_cr", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs_copy", "=", "self", ".", "base_dict", ".", "copy", "(", ")", "kwargs_copy", ".", "update", "(", "*", "*", "kwargs", ")", "kwargs_copy", "[", "'dataset'", "]", "=", "kwargs", ...
Return the name of the residual CR analysis output files
[ "Return", "the", "name", "of", "the", "residual", "CR", "analysis", "output", "files" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/name_policy.py#L495-L505
train
36,313
fermiPy/fermipy
fermipy/diffuse/name_policy.py
NameFactory.galprop_rings_yaml
def galprop_rings_yaml(self, **kwargs): """ return the name of a galprop rings merging yaml file """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) localpath = NameFactory.galprop_rings_yaml_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
python
def galprop_rings_yaml(self, **kwargs): """ return the name of a galprop rings merging yaml file """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) localpath = NameFactory.galprop_rings_yaml_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
[ "def", "galprop_rings_yaml", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs_copy", "=", "self", ".", "base_dict", ".", "copy", "(", ")", "kwargs_copy", ".", "update", "(", "*", "*", "kwargs", ")", "self", ".", "_replace_none", "(", "kwargs_copy"...
return the name of a galprop rings merging yaml file
[ "return", "the", "name", "of", "a", "galprop", "rings", "merging", "yaml", "file" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/name_policy.py#L507-L516
train
36,314
fermiPy/fermipy
fermipy/diffuse/name_policy.py
NameFactory.catalog_split_yaml
def catalog_split_yaml(self, **kwargs): """ return the name of a catalog split yaml file """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) localpath = NameFactory.catalog_split_yaml_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
python
def catalog_split_yaml(self, **kwargs): """ return the name of a catalog split yaml file """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) localpath = NameFactory.catalog_split_yaml_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
[ "def", "catalog_split_yaml", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs_copy", "=", "self", ".", "base_dict", ".", "copy", "(", ")", "kwargs_copy", ".", "update", "(", "*", "*", "kwargs", ")", "self", ".", "_replace_none", "(", "kwargs_copy"...
return the name of a catalog split yaml file
[ "return", "the", "name", "of", "a", "catalog", "split", "yaml", "file" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/name_policy.py#L518-L527
train
36,315
fermiPy/fermipy
fermipy/diffuse/name_policy.py
NameFactory.model_yaml
def model_yaml(self, **kwargs): """ return the name of a model yaml file """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) localpath = NameFactory.model_yaml_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
python
def model_yaml(self, **kwargs): """ return the name of a model yaml file """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) localpath = NameFactory.model_yaml_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
[ "def", "model_yaml", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs_copy", "=", "self", ".", "base_dict", ".", "copy", "(", ")", "kwargs_copy", ".", "update", "(", "*", "*", "kwargs", ")", "self", ".", "_replace_none", "(", "kwargs_copy", ")",...
return the name of a model yaml file
[ "return", "the", "name", "of", "a", "model", "yaml", "file" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/name_policy.py#L529-L538
train
36,316
fermiPy/fermipy
fermipy/diffuse/name_policy.py
NameFactory.fullpath
def fullpath(self, **kwargs): """Return a full path name for a given file """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) return NameFactory.fullpath_format.format(**kwargs_copy)
python
def fullpath(self, **kwargs): """Return a full path name for a given file """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) return NameFactory.fullpath_format.format(**kwargs_copy)
[ "def", "fullpath", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs_copy", "=", "self", ".", "base_dict", ".", "copy", "(", ")", "kwargs_copy", ".", "update", "(", "*", "*", "kwargs", ")", "self", ".", "_replace_none", "(", "kwargs_copy", ")", ...
Return a full path name for a given file
[ "Return", "a", "full", "path", "name", "for", "a", "given", "file" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/name_policy.py#L586-L592
train
36,317
fermiPy/fermipy
fermipy/diffuse/name_policy.py
NameFactory.generic
def generic(self, input_string, **kwargs): """ return a generic filename for a given dataset and component """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs)) kwargs_copy['component'] = kwargs.get( 'component', self.component(**kwargs)) self._replace_none(kwargs_copy) return input_string.format(**kwargs_copy)
python
def generic(self, input_string, **kwargs): """ return a generic filename for a given dataset and component """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs)) kwargs_copy['component'] = kwargs.get( 'component', self.component(**kwargs)) self._replace_none(kwargs_copy) return input_string.format(**kwargs_copy)
[ "def", "generic", "(", "self", ",", "input_string", ",", "*", "*", "kwargs", ")", ":", "kwargs_copy", "=", "self", ".", "base_dict", ".", "copy", "(", ")", "kwargs_copy", ".", "update", "(", "*", "*", "kwargs", ")", "kwargs_copy", "[", "'dataset'", "]"...
return a generic filename for a given dataset and component
[ "return", "a", "generic", "filename", "for", "a", "given", "dataset", "and", "component" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/name_policy.py#L594-L603
train
36,318
fermiPy/fermipy
fermipy/diffuse/name_policy.py
NameFactory.make_filenames
def make_filenames(self, **kwargs): """ Make a dictionary of filenames for various types """ out_dict = dict(ft1file=self.ft1file(**kwargs), ltcube=self.ltcube(**kwargs), ccube=self.ccube(**kwargs), bexpcube=self.bexpcube(**kwargs), srcmaps=self.srcmaps(**kwargs), mcube=self.mcube(**kwargs)) return out_dict
python
def make_filenames(self, **kwargs): """ Make a dictionary of filenames for various types """ out_dict = dict(ft1file=self.ft1file(**kwargs), ltcube=self.ltcube(**kwargs), ccube=self.ccube(**kwargs), bexpcube=self.bexpcube(**kwargs), srcmaps=self.srcmaps(**kwargs), mcube=self.mcube(**kwargs)) return out_dict
[ "def", "make_filenames", "(", "self", ",", "*", "*", "kwargs", ")", ":", "out_dict", "=", "dict", "(", "ft1file", "=", "self", ".", "ft1file", "(", "*", "*", "kwargs", ")", ",", "ltcube", "=", "self", ".", "ltcube", "(", "*", "*", "kwargs", ")", ...
Make a dictionary of filenames for various types
[ "Make", "a", "dictionary", "of", "filenames", "for", "various", "types" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/name_policy.py#L605-L614
train
36,319
fermiPy/fermipy
fermipy/gtutils.py
create_spectrum_from_dict
def create_spectrum_from_dict(spectrum_type, spectral_pars, fn=None): """Create a Function object from a parameter dictionary. Parameters ---------- spectrum_type : str String identifying the spectrum type (e.g. PowerLaw). spectral_pars : dict Dictionary of spectral parameters. """ if fn is None: fn = pyLike.SourceFactory_funcFactory().create(str(spectrum_type)) if spectrum_type == 'PiecewisePowerLaw': build_piecewise_powerlaw(fn, spectral_pars) for k, v in spectral_pars.items(): v.setdefault('scale', 1.0) v.setdefault('min', v['value'] * 1E-3) v.setdefault('max', v['value'] * 1E3) par = fn.getParam(str(k)) vmin = min(float(v['value']), float(v['min'])) vmax = max(float(v['value']), float(v['max'])) par.setValue(float(v['value'])) par.setBounds(vmin, vmax) par.setScale(float(v['scale'])) if 'free' in v and int(v['free']) != 0: par.setFree(True) else: par.setFree(False) fn.setParam(par) return fn
python
def create_spectrum_from_dict(spectrum_type, spectral_pars, fn=None): """Create a Function object from a parameter dictionary. Parameters ---------- spectrum_type : str String identifying the spectrum type (e.g. PowerLaw). spectral_pars : dict Dictionary of spectral parameters. """ if fn is None: fn = pyLike.SourceFactory_funcFactory().create(str(spectrum_type)) if spectrum_type == 'PiecewisePowerLaw': build_piecewise_powerlaw(fn, spectral_pars) for k, v in spectral_pars.items(): v.setdefault('scale', 1.0) v.setdefault('min', v['value'] * 1E-3) v.setdefault('max', v['value'] * 1E3) par = fn.getParam(str(k)) vmin = min(float(v['value']), float(v['min'])) vmax = max(float(v['value']), float(v['max'])) par.setValue(float(v['value'])) par.setBounds(vmin, vmax) par.setScale(float(v['scale'])) if 'free' in v and int(v['free']) != 0: par.setFree(True) else: par.setFree(False) fn.setParam(par) return fn
[ "def", "create_spectrum_from_dict", "(", "spectrum_type", ",", "spectral_pars", ",", "fn", "=", "None", ")", ":", "if", "fn", "is", "None", ":", "fn", "=", "pyLike", ".", "SourceFactory_funcFactory", "(", ")", ".", "create", "(", "str", "(", "spectrum_type",...
Create a Function object from a parameter dictionary. Parameters ---------- spectrum_type : str String identifying the spectrum type (e.g. PowerLaw). spectral_pars : dict Dictionary of spectral parameters.
[ "Create", "a", "Function", "object", "from", "a", "parameter", "dictionary", "." ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/gtutils.py#L208-L248
train
36,320
fermiPy/fermipy
fermipy/gtutils.py
get_priors
def get_priors(like): """Extract priors from a likelihood object.""" npar = len(like.params()) vals = np.ones(npar) errs = np.ones(npar) has_prior = np.array([False] * npar) for i, p in enumerate(like.params()): prior = like[i].log_prior() if prior is None: continue par_names = pyLike.StringVector() prior.getParamNames(par_names) if not 'Mean' in par_names: raise Exception('Failed to find Mean in prior parameters.') if not 'Sigma' in par_names: raise Exception('Failed to find Sigma in prior parameters.') for t in par_names: if t == 'Mean': vals[i] = prior.parameter(t).getValue() if t == 'Sigma': errs[i] = prior.parameter(t).getValue() has_prior[i] = True return vals, errs, has_prior
python
def get_priors(like): """Extract priors from a likelihood object.""" npar = len(like.params()) vals = np.ones(npar) errs = np.ones(npar) has_prior = np.array([False] * npar) for i, p in enumerate(like.params()): prior = like[i].log_prior() if prior is None: continue par_names = pyLike.StringVector() prior.getParamNames(par_names) if not 'Mean' in par_names: raise Exception('Failed to find Mean in prior parameters.') if not 'Sigma' in par_names: raise Exception('Failed to find Sigma in prior parameters.') for t in par_names: if t == 'Mean': vals[i] = prior.parameter(t).getValue() if t == 'Sigma': errs[i] = prior.parameter(t).getValue() has_prior[i] = True return vals, errs, has_prior
[ "def", "get_priors", "(", "like", ")", ":", "npar", "=", "len", "(", "like", ".", "params", "(", ")", ")", "vals", "=", "np", ".", "ones", "(", "npar", ")", "errs", "=", "np", ".", "ones", "(", "npar", ")", "has_prior", "=", "np", ".", "array",...
Extract priors from a likelihood object.
[ "Extract", "priors", "from", "a", "likelihood", "object", "." ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/gtutils.py#L367-L402
train
36,321
fermiPy/fermipy
fermipy/gtutils.py
get_source_pars
def get_source_pars(src): """Extract the parameters associated with a pyLikelihood Source object. """ fnmap = src.getSrcFuncs() keys = fnmap.keys() if 'Position' in keys: ppars = get_function_pars(src.getSrcFuncs()[str('Position')]) elif 'SpatialDist' in keys: ppars = get_function_pars(src.getSrcFuncs()[str('SpatialDist')]) else: raise Exception('Failed to extract spatial parameters.') fn = src.getSrcFuncs()[str('Spectrum')] spars = get_function_pars(fn) for i, p in enumerate(ppars): ppars[i]['is_norm'] = False for i, p in enumerate(spars): if fn.normPar().getName() == p['name']: spars[i]['is_norm'] = True else: spars[i]['is_norm'] = False return spars, ppars
python
def get_source_pars(src): """Extract the parameters associated with a pyLikelihood Source object. """ fnmap = src.getSrcFuncs() keys = fnmap.keys() if 'Position' in keys: ppars = get_function_pars(src.getSrcFuncs()[str('Position')]) elif 'SpatialDist' in keys: ppars = get_function_pars(src.getSrcFuncs()[str('SpatialDist')]) else: raise Exception('Failed to extract spatial parameters.') fn = src.getSrcFuncs()[str('Spectrum')] spars = get_function_pars(fn) for i, p in enumerate(ppars): ppars[i]['is_norm'] = False for i, p in enumerate(spars): if fn.normPar().getName() == p['name']: spars[i]['is_norm'] = True else: spars[i]['is_norm'] = False return spars, ppars
[ "def", "get_source_pars", "(", "src", ")", ":", "fnmap", "=", "src", ".", "getSrcFuncs", "(", ")", "keys", "=", "fnmap", ".", "keys", "(", ")", "if", "'Position'", "in", "keys", ":", "ppars", "=", "get_function_pars", "(", "src", ".", "getSrcFuncs", "(...
Extract the parameters associated with a pyLikelihood Source object.
[ "Extract", "the", "parameters", "associated", "with", "a", "pyLikelihood", "Source", "object", "." ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/gtutils.py#L405-L434
train
36,322
fermiPy/fermipy
fermipy/gtutils.py
SummedLikelihood.nFreeParams
def nFreeParams(self): """Count the number of free parameters in the active model.""" nF = 0 pars = self.params() for par in pars: if par.isFree(): nF += 1 return nF
python
def nFreeParams(self): """Count the number of free parameters in the active model.""" nF = 0 pars = self.params() for par in pars: if par.isFree(): nF += 1 return nF
[ "def", "nFreeParams", "(", "self", ")", ":", "nF", "=", "0", "pars", "=", "self", ".", "params", "(", ")", "for", "par", "in", "pars", ":", "if", "par", ".", "isFree", "(", ")", ":", "nF", "+=", "1", "return", "nF" ]
Count the number of free parameters in the active model.
[ "Count", "the", "number", "of", "free", "parameters", "in", "the", "active", "model", "." ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/gtutils.py#L490-L497
train
36,323
fermiPy/fermipy
fermipy/gtutils.py
BinnedAnalysis.Ts2
def Ts2(self, srcName, reoptimize=False, approx=True, tol=None, MaxIterations=10, verbosity=0): """Computes the TS value for a source indicated by "srcName." If "reoptimize=True" is selected this function will reoptimize the model up to "MaxIterations" given the tolerance "tol" (default is the tolerance selected for the overall fit). If "appox=True" is selected (the default) it will renormalize the model (see _renorm). """ saved_state = LikelihoodState(self) if verbosity > 0: print("*** Start Ts_dl ***") source_attributes = self.getExtraSourceAttributes() self.logLike.syncParams() src = self.logLike.getSource(srcName) self._ts_src = src freeParams = pyLike.DoubleVector() self.logLike.getFreeParamValues(freeParams) logLike1 = self.logLike.value() self.scaleSource(srcName, 1E-10) logLike0 = self.logLike.value() if tol is None: tol = self.tol if reoptimize: if verbosity > 0: print("** Do reoptimize") optFactory = pyLike.OptimizerFactory_instance() myOpt = optFactory.create(self.optimizer, self.logLike) Niter = 1 while Niter <= MaxIterations: try: myOpt.find_min(0, tol) break except RuntimeError as e: print(e) if verbosity > 0: print("** Iteration :", Niter) Niter += 1 else: if approx: try: self._renorm() except ZeroDivisionError: pass self.logLike.syncParams() logLike0 = max(self.logLike.value(), logLike0) Ts_value = 2 * (logLike1 - logLike0) self.scaleSource(srcName, 1E10) self.logLike.setFreeParamValues(freeParams) self.model = SourceModel(self.logLike) for src in source_attributes: self.model[src].__dict__.update(source_attributes[src]) saved_state.restore() self.logLike.value() return Ts_value
python
def Ts2(self, srcName, reoptimize=False, approx=True, tol=None, MaxIterations=10, verbosity=0): """Computes the TS value for a source indicated by "srcName." If "reoptimize=True" is selected this function will reoptimize the model up to "MaxIterations" given the tolerance "tol" (default is the tolerance selected for the overall fit). If "appox=True" is selected (the default) it will renormalize the model (see _renorm). """ saved_state = LikelihoodState(self) if verbosity > 0: print("*** Start Ts_dl ***") source_attributes = self.getExtraSourceAttributes() self.logLike.syncParams() src = self.logLike.getSource(srcName) self._ts_src = src freeParams = pyLike.DoubleVector() self.logLike.getFreeParamValues(freeParams) logLike1 = self.logLike.value() self.scaleSource(srcName, 1E-10) logLike0 = self.logLike.value() if tol is None: tol = self.tol if reoptimize: if verbosity > 0: print("** Do reoptimize") optFactory = pyLike.OptimizerFactory_instance() myOpt = optFactory.create(self.optimizer, self.logLike) Niter = 1 while Niter <= MaxIterations: try: myOpt.find_min(0, tol) break except RuntimeError as e: print(e) if verbosity > 0: print("** Iteration :", Niter) Niter += 1 else: if approx: try: self._renorm() except ZeroDivisionError: pass self.logLike.syncParams() logLike0 = max(self.logLike.value(), logLike0) Ts_value = 2 * (logLike1 - logLike0) self.scaleSource(srcName, 1E10) self.logLike.setFreeParamValues(freeParams) self.model = SourceModel(self.logLike) for src in source_attributes: self.model[src].__dict__.update(source_attributes[src]) saved_state.restore() self.logLike.value() return Ts_value
[ "def", "Ts2", "(", "self", ",", "srcName", ",", "reoptimize", "=", "False", ",", "approx", "=", "True", ",", "tol", "=", "None", ",", "MaxIterations", "=", "10", ",", "verbosity", "=", "0", ")", ":", "saved_state", "=", "LikelihoodState", "(", "self", ...
Computes the TS value for a source indicated by "srcName." If "reoptimize=True" is selected this function will reoptimize the model up to "MaxIterations" given the tolerance "tol" (default is the tolerance selected for the overall fit). If "appox=True" is selected (the default) it will renormalize the model (see _renorm).
[ "Computes", "the", "TS", "value", "for", "a", "source", "indicated", "by", "srcName", "." ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/gtutils.py#L658-L715
train
36,324
fermiPy/fermipy
fermipy/jobs/scatter_gather.py
ScatterGather._make_scatter_logfile_name
def _make_scatter_logfile_name(cls, key, linkname, job_config): """Hook to inster the name of a logfile into the input config """ logfile = job_config.get('logfile', "%s_%s_%s.log" % (cls.default_prefix_logfile, linkname, key)) job_config['logfile'] = logfile
python
def _make_scatter_logfile_name(cls, key, linkname, job_config): """Hook to inster the name of a logfile into the input config """ logfile = job_config.get('logfile', "%s_%s_%s.log" % (cls.default_prefix_logfile, linkname, key)) job_config['logfile'] = logfile
[ "def", "_make_scatter_logfile_name", "(", "cls", ",", "key", ",", "linkname", ",", "job_config", ")", ":", "logfile", "=", "job_config", ".", "get", "(", "'logfile'", ",", "\"%s_%s_%s.log\"", "%", "(", "cls", ".", "default_prefix_logfile", ",", "linkname", ","...
Hook to inster the name of a logfile into the input config
[ "Hook", "to", "inster", "the", "name", "of", "a", "logfile", "into", "the", "input", "config" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/scatter_gather.py#L97-L101
train
36,325
fermiPy/fermipy
fermipy/jobs/scatter_gather.py
ScatterGather.create
def create(cls, **kwargs): """Build and return a `ScatterGather` object """ linkname = kwargs.setdefault('linkname', cls.clientclass.linkname_default) # Don't use setdefault b/c we don't want to build a JobArchive # Unless it is needed job_archive = kwargs.get('job_archive', None) if job_archive is None: job_archive = JobArchive.build_temp_job_archive() kwargs.setdefault('job_archive', job_archive) kwargs_client = dict(linkname=linkname, link_prefix=kwargs.get('link_prefix', ''), file_stage=kwargs.get('file_stage', None), job_archive=job_archive) link = cls.clientclass.create(**kwargs_client) sg = cls(link, **kwargs) return sg
python
def create(cls, **kwargs): """Build and return a `ScatterGather` object """ linkname = kwargs.setdefault('linkname', cls.clientclass.linkname_default) # Don't use setdefault b/c we don't want to build a JobArchive # Unless it is needed job_archive = kwargs.get('job_archive', None) if job_archive is None: job_archive = JobArchive.build_temp_job_archive() kwargs.setdefault('job_archive', job_archive) kwargs_client = dict(linkname=linkname, link_prefix=kwargs.get('link_prefix', ''), file_stage=kwargs.get('file_stage', None), job_archive=job_archive) link = cls.clientclass.create(**kwargs_client) sg = cls(link, **kwargs) return sg
[ "def", "create", "(", "cls", ",", "*", "*", "kwargs", ")", ":", "linkname", "=", "kwargs", ".", "setdefault", "(", "'linkname'", ",", "cls", ".", "clientclass", ".", "linkname_default", ")", "# Don't use setdefault b/c we don't want to build a JobArchive", "# Unless...
Build and return a `ScatterGather` object
[ "Build", "and", "return", "a", "ScatterGather", "object" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/scatter_gather.py#L104-L119
train
36,326
fermiPy/fermipy
fermipy/jobs/scatter_gather.py
ScatterGather._check_link_completion
def _check_link_completion(self, link, fail_pending=False, fail_running=False): """Internal function to check the completion of all the dispatched jobs Returns ------- status_vect : `JobStatusVector` Vector that summarize the number of jobs in various states. """ status_vect = JobStatusVector() for job_key, job_details in link.jobs.items(): # if job_details.status == JobStatus.failed: # failed = True # continue # elif job_details.status == JobStatus.done: # continue if job_key.find(JobDetails.topkey) >= 0: continue job_details.status = self._interface.check_job(job_details) if job_details.status == JobStatus.pending: if fail_pending: job_details.status = JobStatus.failed elif job_details.status == JobStatus.running: if fail_running: job_details.status = JobStatus.failed status_vect[job_details.status] += 1 link.jobs[job_key] = job_details link._set_status_self(job_details.jobkey, job_details.status) return status_vect
python
def _check_link_completion(self, link, fail_pending=False, fail_running=False): """Internal function to check the completion of all the dispatched jobs Returns ------- status_vect : `JobStatusVector` Vector that summarize the number of jobs in various states. """ status_vect = JobStatusVector() for job_key, job_details in link.jobs.items(): # if job_details.status == JobStatus.failed: # failed = True # continue # elif job_details.status == JobStatus.done: # continue if job_key.find(JobDetails.topkey) >= 0: continue job_details.status = self._interface.check_job(job_details) if job_details.status == JobStatus.pending: if fail_pending: job_details.status = JobStatus.failed elif job_details.status == JobStatus.running: if fail_running: job_details.status = JobStatus.failed status_vect[job_details.status] += 1 link.jobs[job_key] = job_details link._set_status_self(job_details.jobkey, job_details.status) return status_vect
[ "def", "_check_link_completion", "(", "self", ",", "link", ",", "fail_pending", "=", "False", ",", "fail_running", "=", "False", ")", ":", "status_vect", "=", "JobStatusVector", "(", ")", "for", "job_key", ",", "job_details", "in", "link", ".", "jobs", ".", ...
Internal function to check the completion of all the dispatched jobs Returns ------- status_vect : `JobStatusVector` Vector that summarize the number of jobs in various states.
[ "Internal", "function", "to", "check", "the", "completion", "of", "all", "the", "dispatched", "jobs" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/scatter_gather.py#L147-L177
train
36,327
fermiPy/fermipy
fermipy/jobs/scatter_gather.py
ScatterGather._build_job_dict
def _build_job_dict(self): """Build a dictionary of `JobDetails` objects for the internal `Link`""" if self.args['dry_run']: status = JobStatus.unknown else: status = JobStatus.not_ready base_config = self.scatter_link.args for jobkey, job_config in sorted(self._job_configs.items()): full_job_config = base_config.copy() full_job_config.update(job_config) ScatterGather._make_scatter_logfile_name(jobkey, self.linkname, full_job_config) logfile = job_config.get('logfile') self._scatter_link._register_job(key=jobkey, job_config=full_job_config, logfile=logfile, status=status)
python
def _build_job_dict(self): """Build a dictionary of `JobDetails` objects for the internal `Link`""" if self.args['dry_run']: status = JobStatus.unknown else: status = JobStatus.not_ready base_config = self.scatter_link.args for jobkey, job_config in sorted(self._job_configs.items()): full_job_config = base_config.copy() full_job_config.update(job_config) ScatterGather._make_scatter_logfile_name(jobkey, self.linkname, full_job_config) logfile = job_config.get('logfile') self._scatter_link._register_job(key=jobkey, job_config=full_job_config, logfile=logfile, status=status)
[ "def", "_build_job_dict", "(", "self", ")", ":", "if", "self", ".", "args", "[", "'dry_run'", "]", ":", "status", "=", "JobStatus", ".", "unknown", "else", ":", "status", "=", "JobStatus", ".", "not_ready", "base_config", "=", "self", ".", "scatter_link", ...
Build a dictionary of `JobDetails` objects for the internal `Link`
[ "Build", "a", "dictionary", "of", "JobDetails", "objects", "for", "the", "internal", "Link" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/scatter_gather.py#L179-L197
train
36,328
fermiPy/fermipy
fermipy/jobs/scatter_gather.py
ScatterGather._invoke
def _invoke(self, argv, stream=sys.stdout, resubmit_failed=False): """Invoke this object to preform a particular action Parameters ---------- argv : list List of command line arguments, passed to helper classes stream : `file` Stream that this function will print to, must have 'write' function. resubmit_failed : bool Resubmit failed jobs. Returns ------- status_vect : `JobStatusVector` Vector that summarize the number of jobs in various states. """ args = self._run_argparser(argv) if args.action not in ACTIONS: sys.stderr.write( "Unrecognized action %s, options are %s\n" % (args.action, ACTIONS)) if args.action == 'skip': return JobStatus.no_job elif args.action in ['run', 'resubmit', 'check_status', 'config']: self._job_configs = self.build_job_configs(args.__dict__) self._interface._dry_run = args.dry_run if args.action == 'run': status_vect = self.run_jobs(stream, resubmit_failed=resubmit_failed) elif args.action == 'resubmit': status_vect = self.resubmit(stream, resubmit_failed=resubmit_failed) elif args.action == 'check_status': self._build_job_dict() status_vect = self.check_status(stream) elif args.action == 'config': self._build_job_dict() status_vect = JobStatusVector() status_vect[JobStatus.done] += 1 return status_vect
python
def _invoke(self, argv, stream=sys.stdout, resubmit_failed=False): """Invoke this object to preform a particular action Parameters ---------- argv : list List of command line arguments, passed to helper classes stream : `file` Stream that this function will print to, must have 'write' function. resubmit_failed : bool Resubmit failed jobs. Returns ------- status_vect : `JobStatusVector` Vector that summarize the number of jobs in various states. """ args = self._run_argparser(argv) if args.action not in ACTIONS: sys.stderr.write( "Unrecognized action %s, options are %s\n" % (args.action, ACTIONS)) if args.action == 'skip': return JobStatus.no_job elif args.action in ['run', 'resubmit', 'check_status', 'config']: self._job_configs = self.build_job_configs(args.__dict__) self._interface._dry_run = args.dry_run if args.action == 'run': status_vect = self.run_jobs(stream, resubmit_failed=resubmit_failed) elif args.action == 'resubmit': status_vect = self.resubmit(stream, resubmit_failed=resubmit_failed) elif args.action == 'check_status': self._build_job_dict() status_vect = self.check_status(stream) elif args.action == 'config': self._build_job_dict() status_vect = JobStatusVector() status_vect[JobStatus.done] += 1 return status_vect
[ "def", "_invoke", "(", "self", ",", "argv", ",", "stream", "=", "sys", ".", "stdout", ",", "resubmit_failed", "=", "False", ")", ":", "args", "=", "self", ".", "_run_argparser", "(", "argv", ")", "if", "args", ".", "action", "not", "in", "ACTIONS", "...
Invoke this object to preform a particular action Parameters ---------- argv : list List of command line arguments, passed to helper classes stream : `file` Stream that this function will print to, must have 'write' function. resubmit_failed : bool Resubmit failed jobs. Returns ------- status_vect : `JobStatusVector` Vector that summarize the number of jobs in various states.
[ "Invoke", "this", "object", "to", "preform", "a", "particular", "action" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/scatter_gather.py#L230-L277
train
36,329
fermiPy/fermipy
fermipy/jobs/scatter_gather.py
ScatterGather.update_args
def update_args(self, override_args): """Update the arguments used to invoke the application Note that this will also update the dictionary of input and output files Parameters ---------- override_args : dict dictionary of arguments to override the current values """ self.args = extract_arguments(override_args, self.args) self._job_configs = self.build_job_configs(self.args) if not self._scatter_link.jobs: self._build_job_dict() self._latch_file_info()
python
def update_args(self, override_args): """Update the arguments used to invoke the application Note that this will also update the dictionary of input and output files Parameters ---------- override_args : dict dictionary of arguments to override the current values """ self.args = extract_arguments(override_args, self.args) self._job_configs = self.build_job_configs(self.args) if not self._scatter_link.jobs: self._build_job_dict() self._latch_file_info()
[ "def", "update_args", "(", "self", ",", "override_args", ")", ":", "self", ".", "args", "=", "extract_arguments", "(", "override_args", ",", "self", ".", "args", ")", "self", ".", "_job_configs", "=", "self", ".", "build_job_configs", "(", "self", ".", "ar...
Update the arguments used to invoke the application Note that this will also update the dictionary of input and output files Parameters ---------- override_args : dict dictionary of arguments to override the current values
[ "Update", "the", "arguments", "used", "to", "invoke", "the", "application" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/scatter_gather.py#L279-L294
train
36,330
fermiPy/fermipy
fermipy/jobs/scatter_gather.py
ScatterGather.clear_jobs
def clear_jobs(self, recursive=True): """Clear the self.jobs dictionary that contains information about jobs associated with this `ScatterGather` If recursive is True this will include jobs from all internal `Link` """ if recursive: self._scatter_link.clear_jobs(recursive) self.jobs.clear()
python
def clear_jobs(self, recursive=True): """Clear the self.jobs dictionary that contains information about jobs associated with this `ScatterGather` If recursive is True this will include jobs from all internal `Link` """ if recursive: self._scatter_link.clear_jobs(recursive) self.jobs.clear()
[ "def", "clear_jobs", "(", "self", ",", "recursive", "=", "True", ")", ":", "if", "recursive", ":", "self", ".", "_scatter_link", ".", "clear_jobs", "(", "recursive", ")", "self", ".", "jobs", ".", "clear", "(", ")" ]
Clear the self.jobs dictionary that contains information about jobs associated with this `ScatterGather` If recursive is True this will include jobs from all internal `Link`
[ "Clear", "the", "self", ".", "jobs", "dictionary", "that", "contains", "information", "about", "jobs", "associated", "with", "this", "ScatterGather" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/scatter_gather.py#L296-L304
train
36,331
fermiPy/fermipy
fermipy/jobs/scatter_gather.py
ScatterGather.check_status
def check_status(self, stream=sys.stdout, check_once=False, fail_pending=False, fail_running=False, no_wait=False, do_print=True, write_status=False): """Loop to check on the status of all the jobs in job dict. Parameters ----------- stream : `file` Stream that this function will print to, Must have 'write' function. check_once : bool Check status once and exit loop. fail_pending : `bool` If True, consider pending jobs as failed fail_running : `bool` If True, consider running jobs as failed no_wait : bool Do not sleep before checking jobs. do_print : bool Print summary stats. write_status : bool Write the status the to log file. Returns ------- status_vect : `JobStatusVector` Vector that summarize the number of jobs in various states. """ running = True first = True if not check_once: if stream != sys.stdout: sys.stdout.write('Checking status (%is): ' % self.args['job_check_sleep']) sys.stdout.flush() status_vect = JobStatusVector() while running: if first: first = False elif self.args['dry_run']: break elif no_wait: pass else: stream.write("Sleeping %.0f seconds between status checks\n" % self.args['job_check_sleep']) if stream != sys.stdout: sys.stdout.write('.') sys.stdout.flush() time.sleep(self.args['job_check_sleep']) status_vect = self._check_link_completion(self._scatter_link, fail_pending, fail_running) if self.args['check_status_once'] or check_once or no_wait: if do_print: self.print_update(stream, status_vect) break if self.args['print_update']: if do_print: self.print_update(stream, status_vect) if self._job_archive is not None: self._job_archive.write_table_file() n_total = status_vect.n_total n_done = status_vect.n_done n_failed = status_vect.n_failed if n_done + n_failed == n_total: running = False status = status_vect.get_status() if status in [JobStatus.failed, JobStatus.partial_failed]: if do_print: self.print_update(stream, status_vect) self.print_failed(stream) if write_status: self._write_status_to_log(status, stream) else: if write_status: self._write_status_to_log(0, stream) self._set_status_self(status=status) if not check_once: if stream != sys.stdout: sys.stdout.write("! %s\n" % (JOB_STATUS_STRINGS[status])) if self._job_archive is not None: self._job_archive.write_table_file() return status_vect
python
def check_status(self, stream=sys.stdout, check_once=False, fail_pending=False, fail_running=False, no_wait=False, do_print=True, write_status=False): """Loop to check on the status of all the jobs in job dict. Parameters ----------- stream : `file` Stream that this function will print to, Must have 'write' function. check_once : bool Check status once and exit loop. fail_pending : `bool` If True, consider pending jobs as failed fail_running : `bool` If True, consider running jobs as failed no_wait : bool Do not sleep before checking jobs. do_print : bool Print summary stats. write_status : bool Write the status the to log file. Returns ------- status_vect : `JobStatusVector` Vector that summarize the number of jobs in various states. """ running = True first = True if not check_once: if stream != sys.stdout: sys.stdout.write('Checking status (%is): ' % self.args['job_check_sleep']) sys.stdout.flush() status_vect = JobStatusVector() while running: if first: first = False elif self.args['dry_run']: break elif no_wait: pass else: stream.write("Sleeping %.0f seconds between status checks\n" % self.args['job_check_sleep']) if stream != sys.stdout: sys.stdout.write('.') sys.stdout.flush() time.sleep(self.args['job_check_sleep']) status_vect = self._check_link_completion(self._scatter_link, fail_pending, fail_running) if self.args['check_status_once'] or check_once or no_wait: if do_print: self.print_update(stream, status_vect) break if self.args['print_update']: if do_print: self.print_update(stream, status_vect) if self._job_archive is not None: self._job_archive.write_table_file() n_total = status_vect.n_total n_done = status_vect.n_done n_failed = status_vect.n_failed if n_done + n_failed == n_total: running = False status = status_vect.get_status() if status in [JobStatus.failed, JobStatus.partial_failed]: if do_print: self.print_update(stream, status_vect) self.print_failed(stream) if write_status: self._write_status_to_log(status, stream) else: if write_status: self._write_status_to_log(0, stream) self._set_status_self(status=status) if not check_once: if stream != sys.stdout: sys.stdout.write("! %s\n" % (JOB_STATUS_STRINGS[status])) if self._job_archive is not None: self._job_archive.write_table_file() return status_vect
[ "def", "check_status", "(", "self", ",", "stream", "=", "sys", ".", "stdout", ",", "check_once", "=", "False", ",", "fail_pending", "=", "False", ",", "fail_running", "=", "False", ",", "no_wait", "=", "False", ",", "do_print", "=", "True", ",", "write_s...
Loop to check on the status of all the jobs in job dict. Parameters ----------- stream : `file` Stream that this function will print to, Must have 'write' function. check_once : bool Check status once and exit loop. fail_pending : `bool` If True, consider pending jobs as failed fail_running : `bool` If True, consider running jobs as failed no_wait : bool Do not sleep before checking jobs. do_print : bool Print summary stats. write_status : bool Write the status the to log file. Returns ------- status_vect : `JobStatusVector` Vector that summarize the number of jobs in various states.
[ "Loop", "to", "check", "on", "the", "status", "of", "all", "the", "jobs", "in", "job", "dict", "." ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/scatter_gather.py#L318-L418
train
36,332
fermiPy/fermipy
fermipy/jobs/scatter_gather.py
ScatterGather.run_jobs
def run_jobs(self, stream=sys.stdout, resubmit_failed=False): """Function to dipatch jobs and collect results Parameters ----------- stream : `file` Stream that this function will print to, Must have 'write' function. resubmit_failed : bool Resubmit failed jobs. Returns ------- status_vect : `JobStatusVector` Vector that summarize the number of jobs in various states. """ self._build_job_dict() self._interface._dry_run = self.args['dry_run'] scatter_status = self._interface.submit_jobs(self.scatter_link, job_archive=self._job_archive, stream=stream) if scatter_status == JobStatus.failed: return JobStatus.failed status_vect = self.check_status(stream, write_status=True) status = status_vect.get_status() if status == JobStatus.partial_failed: if resubmit_failed: sys.write("Resubmitting partially failed link %s\n" % self.full_linkname) status_vect = self.resubmit(stream=stream, fail_running=False, resubmit_failed=True) else: sys.stdout.write("NOT resubmitting partially failed link %s\n" % self.full_linkname) return status_vect
python
def run_jobs(self, stream=sys.stdout, resubmit_failed=False): """Function to dipatch jobs and collect results Parameters ----------- stream : `file` Stream that this function will print to, Must have 'write' function. resubmit_failed : bool Resubmit failed jobs. Returns ------- status_vect : `JobStatusVector` Vector that summarize the number of jobs in various states. """ self._build_job_dict() self._interface._dry_run = self.args['dry_run'] scatter_status = self._interface.submit_jobs(self.scatter_link, job_archive=self._job_archive, stream=stream) if scatter_status == JobStatus.failed: return JobStatus.failed status_vect = self.check_status(stream, write_status=True) status = status_vect.get_status() if status == JobStatus.partial_failed: if resubmit_failed: sys.write("Resubmitting partially failed link %s\n" % self.full_linkname) status_vect = self.resubmit(stream=stream, fail_running=False, resubmit_failed=True) else: sys.stdout.write("NOT resubmitting partially failed link %s\n" % self.full_linkname) return status_vect
[ "def", "run_jobs", "(", "self", ",", "stream", "=", "sys", ".", "stdout", ",", "resubmit_failed", "=", "False", ")", ":", "self", ".", "_build_job_dict", "(", ")", "self", ".", "_interface", ".", "_dry_run", "=", "self", ".", "args", "[", "'dry_run'", ...
Function to dipatch jobs and collect results Parameters ----------- stream : `file` Stream that this function will print to, Must have 'write' function. resubmit_failed : bool Resubmit failed jobs. Returns ------- status_vect : `JobStatusVector` Vector that summarize the number of jobs in various states.
[ "Function", "to", "dipatch", "jobs", "and", "collect", "results" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/scatter_gather.py#L420-L457
train
36,333
fermiPy/fermipy
fermipy/jobs/scatter_gather.py
ScatterGather.resubmit
def resubmit(self, stream=sys.stdout, fail_running=False, resubmit_failed=False): """Function to resubmit failed jobs and collect results Parameters ----------- stream : `file` Stream that this function will print to, Must have 'write' function. fail_running : `bool` If True, consider running jobs as failed resubmit_failed : bool Resubmit failed jobs. Returns ------- status_vect : `JobStatusVector` Vector that summarize the number of jobs in various states. """ self._build_job_dict() status_vect = self.check_status(stream, check_once=True, fail_pending=True, fail_running=fail_running) status = status_vect.get_status() if status == JobStatus.done: return status failed_jobs = self._scatter_link.get_failed_jobs(True, True) if failed_jobs: scatter_status = self._interface.submit_jobs(self._scatter_link, failed_jobs, job_archive=self._job_archive, stream=stream) if scatter_status == JobStatus.failed: return JobStatus.failed status_vect = self.check_status(stream, write_status=True) status = status_vect.get_status() if status == JobStatus.partial_failed: if resubmit_failed: sys.stdout.write("Resubmitting partially failed link %s\n" % self.full_linkname) status_vect = self.resubmit(stream=stream, fail_running=False, resubmit_failed=False) else: sys.stdout.write("NOT resubmitting partially failed link %s\n" % self.full_linkname) if self.args['dry_run']: return JobStatus.unknown return status_vect
python
def resubmit(self, stream=sys.stdout, fail_running=False, resubmit_failed=False): """Function to resubmit failed jobs and collect results Parameters ----------- stream : `file` Stream that this function will print to, Must have 'write' function. fail_running : `bool` If True, consider running jobs as failed resubmit_failed : bool Resubmit failed jobs. Returns ------- status_vect : `JobStatusVector` Vector that summarize the number of jobs in various states. """ self._build_job_dict() status_vect = self.check_status(stream, check_once=True, fail_pending=True, fail_running=fail_running) status = status_vect.get_status() if status == JobStatus.done: return status failed_jobs = self._scatter_link.get_failed_jobs(True, True) if failed_jobs: scatter_status = self._interface.submit_jobs(self._scatter_link, failed_jobs, job_archive=self._job_archive, stream=stream) if scatter_status == JobStatus.failed: return JobStatus.failed status_vect = self.check_status(stream, write_status=True) status = status_vect.get_status() if status == JobStatus.partial_failed: if resubmit_failed: sys.stdout.write("Resubmitting partially failed link %s\n" % self.full_linkname) status_vect = self.resubmit(stream=stream, fail_running=False, resubmit_failed=False) else: sys.stdout.write("NOT resubmitting partially failed link %s\n" % self.full_linkname) if self.args['dry_run']: return JobStatus.unknown return status_vect
[ "def", "resubmit", "(", "self", ",", "stream", "=", "sys", ".", "stdout", ",", "fail_running", "=", "False", ",", "resubmit_failed", "=", "False", ")", ":", "self", ".", "_build_job_dict", "(", ")", "status_vect", "=", "self", ".", "check_status", "(", "...
Function to resubmit failed jobs and collect results Parameters ----------- stream : `file` Stream that this function will print to, Must have 'write' function. fail_running : `bool` If True, consider running jobs as failed resubmit_failed : bool Resubmit failed jobs. Returns ------- status_vect : `JobStatusVector` Vector that summarize the number of jobs in various states.
[ "Function", "to", "resubmit", "failed", "jobs", "and", "collect", "results" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/scatter_gather.py#L459-L509
train
36,334
fermiPy/fermipy
fermipy/jobs/scatter_gather.py
ScatterGather.clean_jobs
def clean_jobs(self, recursive=False): """Clean up all the jobs associated with this object. If recursive is True this also clean jobs dispatch by this object.""" self._interface.clean_jobs(self.scatter_link, clean_all=recursive)
python
def clean_jobs(self, recursive=False): """Clean up all the jobs associated with this object. If recursive is True this also clean jobs dispatch by this object.""" self._interface.clean_jobs(self.scatter_link, clean_all=recursive)
[ "def", "clean_jobs", "(", "self", ",", "recursive", "=", "False", ")", ":", "self", ".", "_interface", ".", "clean_jobs", "(", "self", ".", "scatter_link", ",", "clean_all", "=", "recursive", ")" ]
Clean up all the jobs associated with this object. If recursive is True this also clean jobs dispatch by this object.
[ "Clean", "up", "all", "the", "jobs", "associated", "with", "this", "object", "." ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/scatter_gather.py#L511-L517
train
36,335
fermiPy/fermipy
fermipy/jobs/scatter_gather.py
ScatterGather.print_update
def print_update(self, stream=sys.stdout, job_stats=None): """Print an update about the current number of jobs running """ if job_stats is None: job_stats = JobStatusVector() job_det_list = [] job_det_list += self._scatter_link.jobs.values() for job_dets in job_det_list: if job_dets.status == JobStatus.no_job: continue job_stats[job_dets.status] += 1 stream.write("Status :\n Total : %i\n Unknown: %i\n" % (job_stats.n_total, job_stats[JobStatus.unknown])) stream.write(" Not Ready: %i\n Ready: %i\n" % (job_stats[JobStatus.not_ready], job_stats[JobStatus.ready])) stream.write(" Pending: %i\n Running: %i\n" % (job_stats[JobStatus.pending], job_stats[JobStatus.running])) stream.write(" Done: %i\n Failed: %i\n" % (job_stats[JobStatus.done], job_stats[JobStatus.failed]))
python
def print_update(self, stream=sys.stdout, job_stats=None): """Print an update about the current number of jobs running """ if job_stats is None: job_stats = JobStatusVector() job_det_list = [] job_det_list += self._scatter_link.jobs.values() for job_dets in job_det_list: if job_dets.status == JobStatus.no_job: continue job_stats[job_dets.status] += 1 stream.write("Status :\n Total : %i\n Unknown: %i\n" % (job_stats.n_total, job_stats[JobStatus.unknown])) stream.write(" Not Ready: %i\n Ready: %i\n" % (job_stats[JobStatus.not_ready], job_stats[JobStatus.ready])) stream.write(" Pending: %i\n Running: %i\n" % (job_stats[JobStatus.pending], job_stats[JobStatus.running])) stream.write(" Done: %i\n Failed: %i\n" % (job_stats[JobStatus.done], job_stats[JobStatus.failed]))
[ "def", "print_update", "(", "self", ",", "stream", "=", "sys", ".", "stdout", ",", "job_stats", "=", "None", ")", ":", "if", "job_stats", "is", "None", ":", "job_stats", "=", "JobStatusVector", "(", ")", "job_det_list", "=", "[", "]", "job_det_list", "+=...
Print an update about the current number of jobs running
[ "Print", "an", "update", "about", "the", "current", "number", "of", "jobs", "running" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/scatter_gather.py#L566-L585
train
36,336
fermiPy/fermipy
fermipy/jobs/scatter_gather.py
ScatterGather.print_failed
def print_failed(self, stream=sys.stderr): """Print list of the failed jobs """ for job_key, job_details in sorted(self.scatter_link.jobs.items()): if job_details.status == JobStatus.failed: stream.write("Failed job %s\n log = %s\n" % (job_key, job_details.logfile))
python
def print_failed(self, stream=sys.stderr): """Print list of the failed jobs """ for job_key, job_details in sorted(self.scatter_link.jobs.items()): if job_details.status == JobStatus.failed: stream.write("Failed job %s\n log = %s\n" % (job_key, job_details.logfile))
[ "def", "print_failed", "(", "self", ",", "stream", "=", "sys", ".", "stderr", ")", ":", "for", "job_key", ",", "job_details", "in", "sorted", "(", "self", ".", "scatter_link", ".", "jobs", ".", "items", "(", ")", ")", ":", "if", "job_details", ".", "...
Print list of the failed jobs
[ "Print", "list", "of", "the", "failed", "jobs" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/scatter_gather.py#L587-L592
train
36,337
fermiPy/fermipy
fermipy/scripts/collect_sources.py
read_sources_from_numpy_file
def read_sources_from_numpy_file(npfile): """ Open a numpy pickle file and read all the new sources into a dictionary Parameters ---------- npfile : file name The input numpy pickle file Returns ------- tab : `~astropy.table.Table` """ srcs = np.load(npfile).flat[0]['sources'] roi = ROIModel() roi.load_sources(srcs.values()) return roi.create_table()
python
def read_sources_from_numpy_file(npfile): """ Open a numpy pickle file and read all the new sources into a dictionary Parameters ---------- npfile : file name The input numpy pickle file Returns ------- tab : `~astropy.table.Table` """ srcs = np.load(npfile).flat[0]['sources'] roi = ROIModel() roi.load_sources(srcs.values()) return roi.create_table()
[ "def", "read_sources_from_numpy_file", "(", "npfile", ")", ":", "srcs", "=", "np", ".", "load", "(", "npfile", ")", ".", "flat", "[", "0", "]", "[", "'sources'", "]", "roi", "=", "ROIModel", "(", ")", "roi", ".", "load_sources", "(", "srcs", ".", "va...
Open a numpy pickle file and read all the new sources into a dictionary Parameters ---------- npfile : file name The input numpy pickle file Returns ------- tab : `~astropy.table.Table`
[ "Open", "a", "numpy", "pickle", "file", "and", "read", "all", "the", "new", "sources", "into", "a", "dictionary" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/scripts/collect_sources.py#L11-L27
train
36,338
fermiPy/fermipy
fermipy/scripts/collect_sources.py
read_sources_from_yaml_file
def read_sources_from_yaml_file(yamlfile): """ Open a yaml file and read all the new sources into a dictionary Parameters ---------- yaml : file name The input yaml file Returns ------- tab : `~astropy.table.Table` """ f = open(yamlfile) dd = yaml.load(f) srcs = dd['sources'] f.close() roi = ROIModel() roi.load_sources(srcs.values()) return roi.create_table()
python
def read_sources_from_yaml_file(yamlfile): """ Open a yaml file and read all the new sources into a dictionary Parameters ---------- yaml : file name The input yaml file Returns ------- tab : `~astropy.table.Table` """ f = open(yamlfile) dd = yaml.load(f) srcs = dd['sources'] f.close() roi = ROIModel() roi.load_sources(srcs.values()) return roi.create_table()
[ "def", "read_sources_from_yaml_file", "(", "yamlfile", ")", ":", "f", "=", "open", "(", "yamlfile", ")", "dd", "=", "yaml", ".", "load", "(", "f", ")", "srcs", "=", "dd", "[", "'sources'", "]", "f", ".", "close", "(", ")", "roi", "=", "ROIModel", "...
Open a yaml file and read all the new sources into a dictionary Parameters ---------- yaml : file name The input yaml file Returns ------- tab : `~astropy.table.Table`
[ "Open", "a", "yaml", "file", "and", "read", "all", "the", "new", "sources", "into", "a", "dictionary" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/scripts/collect_sources.py#L30-L48
train
36,339
fermiPy/fermipy
fermipy/scripts/collect_sources.py
merge_source_tables
def merge_source_tables(src_tab, tab, all_sources=False, prefix="", suffix="", roi_idx=None): """Append the sources in a table into another table. Parameters ---------- src_tab : `~astropy.table.Table` Master source table that will be appended with the sources in ``tab``. tab : `~astropy.table.Table` Table to be merged into ``src_tab``. all_sources : bool If true, then all the sources get added to the table. if false, then only the sources that start with 'PS' get added prefix : str Prepended to all source names suffix : str Appended to all source names Returns ------- tab : `~astropy.table.Table` """ if roi_idx is not None and 'roi' not in tab.columns: tab.add_column(Column(name='roi', data=len(tab) * [roi_idx])) remove_rows = [] for i, row in enumerate(tab): if not all_sources and row['name'].find("PS") != 0: remove_rows += [i] continue sname = "%s%s%s" % (prefix, row['name'], suffix) row['name'] = sname tab.remove_rows(remove_rows) if src_tab is None: src_tab = tab else: src_tab = vstack([src_tab, tab], join_type='outer') return src_tab
python
def merge_source_tables(src_tab, tab, all_sources=False, prefix="", suffix="", roi_idx=None): """Append the sources in a table into another table. Parameters ---------- src_tab : `~astropy.table.Table` Master source table that will be appended with the sources in ``tab``. tab : `~astropy.table.Table` Table to be merged into ``src_tab``. all_sources : bool If true, then all the sources get added to the table. if false, then only the sources that start with 'PS' get added prefix : str Prepended to all source names suffix : str Appended to all source names Returns ------- tab : `~astropy.table.Table` """ if roi_idx is not None and 'roi' not in tab.columns: tab.add_column(Column(name='roi', data=len(tab) * [roi_idx])) remove_rows = [] for i, row in enumerate(tab): if not all_sources and row['name'].find("PS") != 0: remove_rows += [i] continue sname = "%s%s%s" % (prefix, row['name'], suffix) row['name'] = sname tab.remove_rows(remove_rows) if src_tab is None: src_tab = tab else: src_tab = vstack([src_tab, tab], join_type='outer') return src_tab
[ "def", "merge_source_tables", "(", "src_tab", ",", "tab", ",", "all_sources", "=", "False", ",", "prefix", "=", "\"\"", ",", "suffix", "=", "\"\"", ",", "roi_idx", "=", "None", ")", ":", "if", "roi_idx", "is", "not", "None", "and", "'roi'", "not", "in"...
Append the sources in a table into another table. Parameters ---------- src_tab : `~astropy.table.Table` Master source table that will be appended with the sources in ``tab``. tab : `~astropy.table.Table` Table to be merged into ``src_tab``. all_sources : bool If true, then all the sources get added to the table. if false, then only the sources that start with 'PS' get added prefix : str Prepended to all source names suffix : str Appended to all source names Returns ------- tab : `~astropy.table.Table`
[ "Append", "the", "sources", "in", "a", "table", "into", "another", "table", "." ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/scripts/collect_sources.py#L51-L98
train
36,340
fermiPy/fermipy
fermipy/ltcube.py
fill_livetime_hist
def fill_livetime_hist(skydir, tab_sc, tab_gti, zmax, costh_edges): """Generate a sequence of livetime distributions at the sky positions given by ``skydir``. The output of the method are two NxM arrays containing a sequence of histograms for N sky positions and M incidence angle bins where the bin edges are defined by ``costh_edges``. This method uses the same algorithm as `gtltcube` with the exception that SC time intervals are assumed to be aligned with GTIs. Parameters ---------- skydir : `~astropy.coordinates.SkyCoord` Vector of sky directions for which livetime histograms will be accumulated. tab_sc : `~astropy.table.Table` Spacecraft table. Must contain the following columns: START, STOP, LIVETIME, RA_SCZ, DEC_SZ, RA_ZENITH, DEC_ZENITH. tab_gti : `~astropy.table.Table` Table of good time intervals (GTIs). zmax : float Zenith cut. costh_edges : `~numpy.ndarray` Incidence angle bin edges in cos(angle). Returns ------- lt : `~numpy.ndarray` Array of livetime histograms. lt_wt : `~numpy.ndarray` Array of histograms of weighted livetime (livetime x livetime fraction). """ if len(tab_gti) == 0: shape = (len(costh_edges) - 1, len(skydir)) return (np.zeros(shape), np.zeros(shape)) m = (tab_sc['START'] < tab_gti['STOP'][-1]) m &= (tab_sc['STOP'] > tab_gti['START'][0]) tab_sc = tab_sc[m] cos_zmax = np.cos(np.radians(zmax)) sc_t0 = np.array(tab_sc['START'].data) sc_t1 = np.array(tab_sc['STOP'].data) sc_live = np.array(tab_sc['LIVETIME'].data) sc_lfrac = sc_live / (sc_t1 - sc_t0) sc_xyz = angle_to_cartesian(np.radians(tab_sc['RA_SCZ'].data), np.radians(tab_sc['DEC_SCZ'].data)) zn_xyz = angle_to_cartesian(np.radians(tab_sc['RA_ZENITH'].data), np.radians(tab_sc['DEC_ZENITH'].data)) tab_gti_t0 = np.array(tab_gti['START'].data) tab_gti_t1 = np.array(tab_gti['STOP'].data) # Index of the closest GTI interval idx = np.digitize(sc_t0, tab_gti_t0) - 1 # start/stop time of closest GTI interval gti_t0 = np.zeros_like(sc_t0) gti_t1 = np.zeros_like(sc_t1) gti_t0[idx >= 0] = tab_gti_t0[idx[idx >= 0]] gti_t1[idx >= 0] = tab_gti_t1[idx[idx >= 0]] nbin = len(costh_edges) - 1 lt = np.zeros((nbin,) + skydir.shape) lt_wt = np.zeros((nbin,) + skydir.shape) m0 = (idx >= 0) & (sc_t0 >= gti_t0) & (sc_t1 <= gti_t1) xyz = angle_to_cartesian(skydir.ra.rad, skydir.dec.rad) for i, t in enumerate(xyz): cos_sep = utils.dot_prod(t, sc_xyz) cos_zn = utils.dot_prod(t, zn_xyz) m = m0 & (cos_zn > cos_zmax) & (cos_sep > 0.0) bins = np.digitize(cos_sep[m], bins=costh_edges) - 1 bins = np.clip(bins, 0, nbin - 1) lt[:, i] = np.bincount(bins, weights=sc_live[m], minlength=nbin) lt_wt[:, i] = np.bincount(bins, weights=sc_live[m] * sc_lfrac[m], minlength=nbin) return lt, lt_wt
python
def fill_livetime_hist(skydir, tab_sc, tab_gti, zmax, costh_edges): """Generate a sequence of livetime distributions at the sky positions given by ``skydir``. The output of the method are two NxM arrays containing a sequence of histograms for N sky positions and M incidence angle bins where the bin edges are defined by ``costh_edges``. This method uses the same algorithm as `gtltcube` with the exception that SC time intervals are assumed to be aligned with GTIs. Parameters ---------- skydir : `~astropy.coordinates.SkyCoord` Vector of sky directions for which livetime histograms will be accumulated. tab_sc : `~astropy.table.Table` Spacecraft table. Must contain the following columns: START, STOP, LIVETIME, RA_SCZ, DEC_SZ, RA_ZENITH, DEC_ZENITH. tab_gti : `~astropy.table.Table` Table of good time intervals (GTIs). zmax : float Zenith cut. costh_edges : `~numpy.ndarray` Incidence angle bin edges in cos(angle). Returns ------- lt : `~numpy.ndarray` Array of livetime histograms. lt_wt : `~numpy.ndarray` Array of histograms of weighted livetime (livetime x livetime fraction). """ if len(tab_gti) == 0: shape = (len(costh_edges) - 1, len(skydir)) return (np.zeros(shape), np.zeros(shape)) m = (tab_sc['START'] < tab_gti['STOP'][-1]) m &= (tab_sc['STOP'] > tab_gti['START'][0]) tab_sc = tab_sc[m] cos_zmax = np.cos(np.radians(zmax)) sc_t0 = np.array(tab_sc['START'].data) sc_t1 = np.array(tab_sc['STOP'].data) sc_live = np.array(tab_sc['LIVETIME'].data) sc_lfrac = sc_live / (sc_t1 - sc_t0) sc_xyz = angle_to_cartesian(np.radians(tab_sc['RA_SCZ'].data), np.radians(tab_sc['DEC_SCZ'].data)) zn_xyz = angle_to_cartesian(np.radians(tab_sc['RA_ZENITH'].data), np.radians(tab_sc['DEC_ZENITH'].data)) tab_gti_t0 = np.array(tab_gti['START'].data) tab_gti_t1 = np.array(tab_gti['STOP'].data) # Index of the closest GTI interval idx = np.digitize(sc_t0, tab_gti_t0) - 1 # start/stop time of closest GTI interval gti_t0 = np.zeros_like(sc_t0) gti_t1 = np.zeros_like(sc_t1) gti_t0[idx >= 0] = tab_gti_t0[idx[idx >= 0]] gti_t1[idx >= 0] = tab_gti_t1[idx[idx >= 0]] nbin = len(costh_edges) - 1 lt = np.zeros((nbin,) + skydir.shape) lt_wt = np.zeros((nbin,) + skydir.shape) m0 = (idx >= 0) & (sc_t0 >= gti_t0) & (sc_t1 <= gti_t1) xyz = angle_to_cartesian(skydir.ra.rad, skydir.dec.rad) for i, t in enumerate(xyz): cos_sep = utils.dot_prod(t, sc_xyz) cos_zn = utils.dot_prod(t, zn_xyz) m = m0 & (cos_zn > cos_zmax) & (cos_sep > 0.0) bins = np.digitize(cos_sep[m], bins=costh_edges) - 1 bins = np.clip(bins, 0, nbin - 1) lt[:, i] = np.bincount(bins, weights=sc_live[m], minlength=nbin) lt_wt[:, i] = np.bincount(bins, weights=sc_live[m] * sc_lfrac[m], minlength=nbin) return lt, lt_wt
[ "def", "fill_livetime_hist", "(", "skydir", ",", "tab_sc", ",", "tab_gti", ",", "zmax", ",", "costh_edges", ")", ":", "if", "len", "(", "tab_gti", ")", "==", "0", ":", "shape", "=", "(", "len", "(", "costh_edges", ")", "-", "1", ",", "len", "(", "s...
Generate a sequence of livetime distributions at the sky positions given by ``skydir``. The output of the method are two NxM arrays containing a sequence of histograms for N sky positions and M incidence angle bins where the bin edges are defined by ``costh_edges``. This method uses the same algorithm as `gtltcube` with the exception that SC time intervals are assumed to be aligned with GTIs. Parameters ---------- skydir : `~astropy.coordinates.SkyCoord` Vector of sky directions for which livetime histograms will be accumulated. tab_sc : `~astropy.table.Table` Spacecraft table. Must contain the following columns: START, STOP, LIVETIME, RA_SCZ, DEC_SZ, RA_ZENITH, DEC_ZENITH. tab_gti : `~astropy.table.Table` Table of good time intervals (GTIs). zmax : float Zenith cut. costh_edges : `~numpy.ndarray` Incidence angle bin edges in cos(angle). Returns ------- lt : `~numpy.ndarray` Array of livetime histograms. lt_wt : `~numpy.ndarray` Array of histograms of weighted livetime (livetime x livetime fraction).
[ "Generate", "a", "sequence", "of", "livetime", "distributions", "at", "the", "sky", "positions", "given", "by", "skydir", ".", "The", "output", "of", "the", "method", "are", "two", "NxM", "arrays", "containing", "a", "sequence", "of", "histograms", "for", "N...
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/ltcube.py#L20-L108
train
36,341
fermiPy/fermipy
fermipy/ltcube.py
LTCube.create
def create(cls, ltfile): """Create a livetime cube from a single file or list of files.""" if not re.search('\.txt?', ltfile) is None: files = np.loadtxt(ltfile, unpack=True, dtype='str') elif not isinstance(ltfile, list): files = glob.glob(ltfile) ltc = cls.create_from_fits(files[0]) for f in files[1:]: ltc.load_ltfile(f) return ltc
python
def create(cls, ltfile): """Create a livetime cube from a single file or list of files.""" if not re.search('\.txt?', ltfile) is None: files = np.loadtxt(ltfile, unpack=True, dtype='str') elif not isinstance(ltfile, list): files = glob.glob(ltfile) ltc = cls.create_from_fits(files[0]) for f in files[1:]: ltc.load_ltfile(f) return ltc
[ "def", "create", "(", "cls", ",", "ltfile", ")", ":", "if", "not", "re", ".", "search", "(", "'\\.txt?'", ",", "ltfile", ")", "is", "None", ":", "files", "=", "np", ".", "loadtxt", "(", "ltfile", ",", "unpack", "=", "True", ",", "dtype", "=", "'s...
Create a livetime cube from a single file or list of files.
[ "Create", "a", "livetime", "cube", "from", "a", "single", "file", "or", "list", "of", "files", "." ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/ltcube.py#L183-L196
train
36,342
fermiPy/fermipy
fermipy/ltcube.py
LTCube.create_empty
def create_empty(cls, tstart, tstop, fill=0.0, nside=64): """Create an empty livetime cube.""" cth_edges = np.linspace(0, 1.0, 41) domega = utils.edge_to_width(cth_edges) * 2.0 * np.pi hpx = HPX(nside, True, 'CEL', ebins=cth_edges) data = np.ones((len(cth_edges) - 1, hpx.npix)) * fill return cls(data, hpx, cth_edges, tstart=tstart, tstop=tstop)
python
def create_empty(cls, tstart, tstop, fill=0.0, nside=64): """Create an empty livetime cube.""" cth_edges = np.linspace(0, 1.0, 41) domega = utils.edge_to_width(cth_edges) * 2.0 * np.pi hpx = HPX(nside, True, 'CEL', ebins=cth_edges) data = np.ones((len(cth_edges) - 1, hpx.npix)) * fill return cls(data, hpx, cth_edges, tstart=tstart, tstop=tstop)
[ "def", "create_empty", "(", "cls", ",", "tstart", ",", "tstop", ",", "fill", "=", "0.0", ",", "nside", "=", "64", ")", ":", "cth_edges", "=", "np", ".", "linspace", "(", "0", ",", "1.0", ",", "41", ")", "domega", "=", "utils", ".", "edge_to_width",...
Create an empty livetime cube.
[ "Create", "an", "empty", "livetime", "cube", "." ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/ltcube.py#L239-L245
train
36,343
fermiPy/fermipy
fermipy/ltcube.py
LTCube.create_skydir_ltcube
def create_skydir_ltcube(self, skydir, tab_sc, tab_gti, zmax): """Create a new livetime cube by scaling this one by the observing profile ratio in the direction ``skydir``. This method can be used to generate an approximate livetime cube that is accurate in the vicinity of ``skydir``. Parameters ---------- skydir : `~astropy.coordinates.SkyCoord` tab_sc : `~astropy.table.Table` Spacecraft (FT2) table. tab_gti : `~astropy.table.Table` Table of GTIs. zmax : float Zenith angle cut. """ skydir = SkyCoord(np.array([skydir.ra.deg]), np.array([skydir.dec.deg]), unit='deg') lt, lt_wt = fill_livetime_hist(skydir, tab_sc, tab_gti, zmax, self.costh_edges) ipix = self.hpx.skydir_to_pixel(skydir) lt_scale = np.ones_like(lt) lt_wt_scale = np.ones_like(lt_wt) m = self.data[:, ipix] > 0.0 lt_scale[m] = lt[m] / self.data[:, ipix][m] lt_wt_scale[m] = lt_wt[m] / self._data_wt[:, ipix][m] data = self.data * lt_scale data_wt = self._data_wt * lt_wt_scale return LTCube(data, copy.deepcopy(self.hpx), self.costh_edges, # tstart=np.min(tab_gti_t0), # tstop=np.max(tab_gti_t1), zmax=zmax, data_wt=data_wt)
python
def create_skydir_ltcube(self, skydir, tab_sc, tab_gti, zmax): """Create a new livetime cube by scaling this one by the observing profile ratio in the direction ``skydir``. This method can be used to generate an approximate livetime cube that is accurate in the vicinity of ``skydir``. Parameters ---------- skydir : `~astropy.coordinates.SkyCoord` tab_sc : `~astropy.table.Table` Spacecraft (FT2) table. tab_gti : `~astropy.table.Table` Table of GTIs. zmax : float Zenith angle cut. """ skydir = SkyCoord(np.array([skydir.ra.deg]), np.array([skydir.dec.deg]), unit='deg') lt, lt_wt = fill_livetime_hist(skydir, tab_sc, tab_gti, zmax, self.costh_edges) ipix = self.hpx.skydir_to_pixel(skydir) lt_scale = np.ones_like(lt) lt_wt_scale = np.ones_like(lt_wt) m = self.data[:, ipix] > 0.0 lt_scale[m] = lt[m] / self.data[:, ipix][m] lt_wt_scale[m] = lt_wt[m] / self._data_wt[:, ipix][m] data = self.data * lt_scale data_wt = self._data_wt * lt_wt_scale return LTCube(data, copy.deepcopy(self.hpx), self.costh_edges, # tstart=np.min(tab_gti_t0), # tstop=np.max(tab_gti_t1), zmax=zmax, data_wt=data_wt)
[ "def", "create_skydir_ltcube", "(", "self", ",", "skydir", ",", "tab_sc", ",", "tab_gti", ",", "zmax", ")", ":", "skydir", "=", "SkyCoord", "(", "np", ".", "array", "(", "[", "skydir", ".", "ra", ".", "deg", "]", ")", ",", "np", ".", "array", "(", ...
Create a new livetime cube by scaling this one by the observing profile ratio in the direction ``skydir``. This method can be used to generate an approximate livetime cube that is accurate in the vicinity of ``skydir``. Parameters ---------- skydir : `~astropy.coordinates.SkyCoord` tab_sc : `~astropy.table.Table` Spacecraft (FT2) table. tab_gti : `~astropy.table.Table` Table of GTIs. zmax : float Zenith angle cut.
[ "Create", "a", "new", "livetime", "cube", "by", "scaling", "this", "one", "by", "the", "observing", "profile", "ratio", "in", "the", "direction", "skydir", ".", "This", "method", "can", "be", "used", "to", "generate", "an", "approximate", "livetime", "cube",...
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/ltcube.py#L341-L380
train
36,344
fermiPy/fermipy
fermipy/ltcube.py
LTCube.write
def write(self, outfile): """Write the livetime cube to a FITS file.""" hdu_pri = fits.PrimaryHDU() hdu_exp = self._create_exp_hdu(self.data) hdu_exp.name = 'EXPOSURE' hdu_exp_wt = self._create_exp_hdu(self._data_wt) hdu_exp_wt.name = 'WEIGHTED_EXPOSURE' cols = [Column(name='CTHETA_MIN', dtype='f4', data=self.costh_edges[:-1][::-1]), Column(name='CTHETA_MAX', dtype='f4', data=self.costh_edges[1:][::-1]), ] hdu_bnds = fits.table_to_hdu(Table(cols)) hdu_bnds.name = 'CTHETABOUNDS' hdu_gti = fits.table_to_hdu(self._tab_gti) hdu_gti.name = 'GTI' hdus = [hdu_pri, hdu_exp, hdu_exp_wt, hdu_bnds, hdu_gti] for hdu in hdus: hdu.header['TSTART'] = self.tstart hdu.header['TSTOP'] = self.tstop with fits.HDUList(hdus) as hdulist: hdulist.writeto(outfile, clobber=True)
python
def write(self, outfile): """Write the livetime cube to a FITS file.""" hdu_pri = fits.PrimaryHDU() hdu_exp = self._create_exp_hdu(self.data) hdu_exp.name = 'EXPOSURE' hdu_exp_wt = self._create_exp_hdu(self._data_wt) hdu_exp_wt.name = 'WEIGHTED_EXPOSURE' cols = [Column(name='CTHETA_MIN', dtype='f4', data=self.costh_edges[:-1][::-1]), Column(name='CTHETA_MAX', dtype='f4', data=self.costh_edges[1:][::-1]), ] hdu_bnds = fits.table_to_hdu(Table(cols)) hdu_bnds.name = 'CTHETABOUNDS' hdu_gti = fits.table_to_hdu(self._tab_gti) hdu_gti.name = 'GTI' hdus = [hdu_pri, hdu_exp, hdu_exp_wt, hdu_bnds, hdu_gti] for hdu in hdus: hdu.header['TSTART'] = self.tstart hdu.header['TSTOP'] = self.tstop with fits.HDUList(hdus) as hdulist: hdulist.writeto(outfile, clobber=True)
[ "def", "write", "(", "self", ",", "outfile", ")", ":", "hdu_pri", "=", "fits", ".", "PrimaryHDU", "(", ")", "hdu_exp", "=", "self", ".", "_create_exp_hdu", "(", "self", ".", "data", ")", "hdu_exp", ".", "name", "=", "'EXPOSURE'", "hdu_exp_wt", "=", "se...
Write the livetime cube to a FITS file.
[ "Write", "the", "livetime", "cube", "to", "a", "FITS", "file", "." ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/ltcube.py#L407-L435
train
36,345
fermiPy/fermipy
fermipy/plotting.py
make_cube_slice
def make_cube_slice(map_in, loge_bounds): """Extract a slice from a map cube object. """ # FIXME: This functionality should be moved into a slice method of # gammapy.maps axis = map_in.geom.axes[0] i0 = utils.val_to_edge(axis.edges, 10**loge_bounds[0])[0] i1 = utils.val_to_edge(axis.edges, 10**loge_bounds[1])[0] new_axis = map_in.geom.axes[0].slice(slice(i0, i1)) geom = map_in.geom.to_image() geom = geom.to_cube([new_axis]) map_out = WcsNDMap(geom, map_in.data[slice(i0, i1), ...].copy()) return map_out
python
def make_cube_slice(map_in, loge_bounds): """Extract a slice from a map cube object. """ # FIXME: This functionality should be moved into a slice method of # gammapy.maps axis = map_in.geom.axes[0] i0 = utils.val_to_edge(axis.edges, 10**loge_bounds[0])[0] i1 = utils.val_to_edge(axis.edges, 10**loge_bounds[1])[0] new_axis = map_in.geom.axes[0].slice(slice(i0, i1)) geom = map_in.geom.to_image() geom = geom.to_cube([new_axis]) map_out = WcsNDMap(geom, map_in.data[slice(i0, i1), ...].copy()) return map_out
[ "def", "make_cube_slice", "(", "map_in", ",", "loge_bounds", ")", ":", "# FIXME: This functionality should be moved into a slice method of", "# gammapy.maps", "axis", "=", "map_in", ".", "geom", ".", "axes", "[", "0", "]", "i0", "=", "utils", ".", "val_to_edge", "("...
Extract a slice from a map cube object.
[ "Extract", "a", "slice", "from", "a", "map", "cube", "object", "." ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/plotting.py#L343-L355
train
36,346
fermiPy/fermipy
fermipy/plotting.py
SEDPlotter.plot_sed
def plot_sed(sed, showlnl=False, **kwargs): """Render a plot of a spectral energy distribution. Parameters ---------- showlnl : bool Overlay a map of the delta-loglikelihood values vs. flux in each energy bin. cmap : str Colormap that will be used for the delta-loglikelihood map. llhcut : float Minimum delta-loglikelihood value. ul_ts_threshold : float TS threshold that determines whether the MLE or UL is plotted in each energy bin. """ ax = kwargs.pop('ax', plt.gca()) cmap = kwargs.get('cmap', 'BuGn') annotate_name(sed, ax=ax) SEDPlotter.plot_flux_points(sed, **kwargs) if np.any(sed['ts'] > 9.): if 'model_flux' in sed: SEDPlotter.plot_model(sed['model_flux'], noband=showlnl, **kwargs) if showlnl: SEDPlotter.plot_lnlscan(sed, **kwargs) ax.set_yscale('log') ax.set_xscale('log') ax.set_xlabel('Energy [MeV]') ax.set_ylabel('E$^{2}$dN/dE [MeV cm$^{-2}$ s$^{-1}$]')
python
def plot_sed(sed, showlnl=False, **kwargs): """Render a plot of a spectral energy distribution. Parameters ---------- showlnl : bool Overlay a map of the delta-loglikelihood values vs. flux in each energy bin. cmap : str Colormap that will be used for the delta-loglikelihood map. llhcut : float Minimum delta-loglikelihood value. ul_ts_threshold : float TS threshold that determines whether the MLE or UL is plotted in each energy bin. """ ax = kwargs.pop('ax', plt.gca()) cmap = kwargs.get('cmap', 'BuGn') annotate_name(sed, ax=ax) SEDPlotter.plot_flux_points(sed, **kwargs) if np.any(sed['ts'] > 9.): if 'model_flux' in sed: SEDPlotter.plot_model(sed['model_flux'], noband=showlnl, **kwargs) if showlnl: SEDPlotter.plot_lnlscan(sed, **kwargs) ax.set_yscale('log') ax.set_xscale('log') ax.set_xlabel('Energy [MeV]') ax.set_ylabel('E$^{2}$dN/dE [MeV cm$^{-2}$ s$^{-1}$]')
[ "def", "plot_sed", "(", "sed", ",", "showlnl", "=", "False", ",", "*", "*", "kwargs", ")", ":", "ax", "=", "kwargs", ".", "pop", "(", "'ax'", ",", "plt", ".", "gca", "(", ")", ")", "cmap", "=", "kwargs", ".", "get", "(", "'cmap'", ",", "'BuGn'"...
Render a plot of a spectral energy distribution. Parameters ---------- showlnl : bool Overlay a map of the delta-loglikelihood values vs. flux in each energy bin. cmap : str Colormap that will be used for the delta-loglikelihood map. llhcut : float Minimum delta-loglikelihood value. ul_ts_threshold : float TS threshold that determines whether the MLE or UL is plotted in each energy bin.
[ "Render", "a", "plot", "of", "a", "spectral", "energy", "distribution", "." ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/plotting.py#L815-L855
train
36,347
fermiPy/fermipy
fermipy/plotting.py
AnalysisPlotter.run
def run(self, gta, mcube_map, **kwargs): """Make all plots.""" prefix = kwargs.get('prefix', 'test') format = kwargs.get('format', self.config['format']) loge_bounds = [None] + self.config['loge_bounds'] for x in loge_bounds: self.make_roi_plots(gta, mcube_map, loge_bounds=x, **kwargs) imfile = utils.format_filename(self.config['fileio']['workdir'], 'counts_spectrum', prefix=[prefix], extension=format) make_counts_spectrum_plot(gta._roi_data, gta.roi, gta.log_energies, imfile, **kwargs)
python
def run(self, gta, mcube_map, **kwargs): """Make all plots.""" prefix = kwargs.get('prefix', 'test') format = kwargs.get('format', self.config['format']) loge_bounds = [None] + self.config['loge_bounds'] for x in loge_bounds: self.make_roi_plots(gta, mcube_map, loge_bounds=x, **kwargs) imfile = utils.format_filename(self.config['fileio']['workdir'], 'counts_spectrum', prefix=[prefix], extension=format) make_counts_spectrum_plot(gta._roi_data, gta.roi, gta.log_energies, imfile, **kwargs)
[ "def", "run", "(", "self", ",", "gta", ",", "mcube_map", ",", "*", "*", "kwargs", ")", ":", "prefix", "=", "kwargs", ".", "get", "(", "'prefix'", ",", "'test'", ")", "format", "=", "kwargs", ".", "get", "(", "'format'", ",", "self", ".", "config", ...
Make all plots.
[ "Make", "all", "plots", "." ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/plotting.py#L924-L941
train
36,348
fermiPy/fermipy
fermipy/plotting.py
AnalysisPlotter._plot_extension
def _plot_extension(self, gta, prefix, src, loge_bounds=None, **kwargs): """Utility function for generating diagnostic plots for the extension analysis.""" # format = kwargs.get('format', self.config['plotting']['format']) if loge_bounds is None: loge_bounds = (self.energies[0], self.energies[-1]) name = src['name'].lower().replace(' ', '_') esuffix = '_%.3f_%.3f' % (loge_bounds[0], loge_bounds[1]) p = ExtensionPlotter(src, self.roi, '', self.config['fileio']['workdir'], loge_bounds=loge_bounds) fig = plt.figure() p.plot(0) plt.gca().set_xlim(-2, 2) ROIPlotter.setup_projection_axis(0) annotate(src=src, loge_bounds=loge_bounds) plt.savefig(os.path.join(self.config['fileio']['workdir'], '%s_%s_extension_xproj%s.png' % ( prefix, name, esuffix))) plt.close(fig) fig = plt.figure() p.plot(1) plt.gca().set_xlim(-2, 2) ROIPlotter.setup_projection_axis(1) annotate(src=src, loge_bounds=loge_bounds) plt.savefig(os.path.join(self.config['fileio']['workdir'], '%s_%s_extension_yproj%s.png' % ( prefix, name, esuffix))) plt.close(fig) for i, c in enumerate(self.components): suffix = '_%02i' % i p = ExtensionPlotter(src, self.roi, suffix, self.config['fileio']['workdir'], loge_bounds=loge_bounds) fig = plt.figure() p.plot(0) ROIPlotter.setup_projection_axis(0, loge_bounds=loge_bounds) annotate(src=src, loge_bounds=loge_bounds) plt.gca().set_xlim(-2, 2) plt.savefig(os.path.join(self.config['fileio']['workdir'], '%s_%s_extension_xproj%s%s.png' % ( prefix, name, esuffix, suffix))) plt.close(fig) fig = plt.figure() p.plot(1) plt.gca().set_xlim(-2, 2) ROIPlotter.setup_projection_axis(1, loge_bounds=loge_bounds) annotate(src=src, loge_bounds=loge_bounds) plt.savefig(os.path.join(self.config['fileio']['workdir'], '%s_%s_extension_yproj%s%s.png' % ( prefix, name, esuffix, suffix))) plt.close(fig)
python
def _plot_extension(self, gta, prefix, src, loge_bounds=None, **kwargs): """Utility function for generating diagnostic plots for the extension analysis.""" # format = kwargs.get('format', self.config['plotting']['format']) if loge_bounds is None: loge_bounds = (self.energies[0], self.energies[-1]) name = src['name'].lower().replace(' ', '_') esuffix = '_%.3f_%.3f' % (loge_bounds[0], loge_bounds[1]) p = ExtensionPlotter(src, self.roi, '', self.config['fileio']['workdir'], loge_bounds=loge_bounds) fig = plt.figure() p.plot(0) plt.gca().set_xlim(-2, 2) ROIPlotter.setup_projection_axis(0) annotate(src=src, loge_bounds=loge_bounds) plt.savefig(os.path.join(self.config['fileio']['workdir'], '%s_%s_extension_xproj%s.png' % ( prefix, name, esuffix))) plt.close(fig) fig = plt.figure() p.plot(1) plt.gca().set_xlim(-2, 2) ROIPlotter.setup_projection_axis(1) annotate(src=src, loge_bounds=loge_bounds) plt.savefig(os.path.join(self.config['fileio']['workdir'], '%s_%s_extension_yproj%s.png' % ( prefix, name, esuffix))) plt.close(fig) for i, c in enumerate(self.components): suffix = '_%02i' % i p = ExtensionPlotter(src, self.roi, suffix, self.config['fileio']['workdir'], loge_bounds=loge_bounds) fig = plt.figure() p.plot(0) ROIPlotter.setup_projection_axis(0, loge_bounds=loge_bounds) annotate(src=src, loge_bounds=loge_bounds) plt.gca().set_xlim(-2, 2) plt.savefig(os.path.join(self.config['fileio']['workdir'], '%s_%s_extension_xproj%s%s.png' % ( prefix, name, esuffix, suffix))) plt.close(fig) fig = plt.figure() p.plot(1) plt.gca().set_xlim(-2, 2) ROIPlotter.setup_projection_axis(1, loge_bounds=loge_bounds) annotate(src=src, loge_bounds=loge_bounds) plt.savefig(os.path.join(self.config['fileio']['workdir'], '%s_%s_extension_yproj%s%s.png' % ( prefix, name, esuffix, suffix))) plt.close(fig)
[ "def", "_plot_extension", "(", "self", ",", "gta", ",", "prefix", ",", "src", ",", "loge_bounds", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# format = kwargs.get('format', self.config['plotting']['format'])", "if", "loge_bounds", "is", "None", ":", "loge_bo...
Utility function for generating diagnostic plots for the extension analysis.
[ "Utility", "function", "for", "generating", "diagnostic", "plots", "for", "the", "extension", "analysis", "." ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/plotting.py#L1538-L1600
train
36,349
fermiPy/fermipy
fermipy/jobs/gtlink.py
extract_parameters
def extract_parameters(pil, keys=None): """Extract and return parameter names and values from a pil object Parameters ---------- pil : `Pil` object keys : list List of parameter names, if None, extact all parameters Returns ------- out_dict : dict Dictionary with parameter name, value pairs """ out_dict = {} if keys is None: keys = pil.keys() for key in keys: try: out_dict[key] = pil[key] except ValueError: out_dict[key] = None return out_dict
python
def extract_parameters(pil, keys=None): """Extract and return parameter names and values from a pil object Parameters ---------- pil : `Pil` object keys : list List of parameter names, if None, extact all parameters Returns ------- out_dict : dict Dictionary with parameter name, value pairs """ out_dict = {} if keys is None: keys = pil.keys() for key in keys: try: out_dict[key] = pil[key] except ValueError: out_dict[key] = None return out_dict
[ "def", "extract_parameters", "(", "pil", ",", "keys", "=", "None", ")", ":", "out_dict", "=", "{", "}", "if", "keys", "is", "None", ":", "keys", "=", "pil", ".", "keys", "(", ")", "for", "key", "in", "keys", ":", "try", ":", "out_dict", "[", "key...
Extract and return parameter names and values from a pil object Parameters ---------- pil : `Pil` object keys : list List of parameter names, if None, extact all parameters Returns ------- out_dict : dict Dictionary with parameter name, value pairs
[ "Extract", "and", "return", "parameter", "names", "and", "values", "from", "a", "pil", "object" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/gtlink.py#L14-L39
train
36,350
fermiPy/fermipy
fermipy/jobs/gtlink.py
update_gtapp
def update_gtapp(gtapp, **kwargs): """Update the parameters of the object that can run ScienceTools applications Parameters ---------- gtapp : `GtApp.GtApp` Object that will run the application in question kwargs : arguments used to invoke the application """ for key, val in kwargs.items(): if key in ['pfiles', 'scratch']: continue if val is None: continue try: gtapp[key] = val except ValueError: raise ValueError( "gtapp failed to set parameter %s %s" % (key, val)) except KeyError: raise KeyError("gtapp failed to set parameter %s %s" % (key, val))
python
def update_gtapp(gtapp, **kwargs): """Update the parameters of the object that can run ScienceTools applications Parameters ---------- gtapp : `GtApp.GtApp` Object that will run the application in question kwargs : arguments used to invoke the application """ for key, val in kwargs.items(): if key in ['pfiles', 'scratch']: continue if val is None: continue try: gtapp[key] = val except ValueError: raise ValueError( "gtapp failed to set parameter %s %s" % (key, val)) except KeyError: raise KeyError("gtapp failed to set parameter %s %s" % (key, val))
[ "def", "update_gtapp", "(", "gtapp", ",", "*", "*", "kwargs", ")", ":", "for", "key", ",", "val", "in", "kwargs", ".", "items", "(", ")", ":", "if", "key", "in", "[", "'pfiles'", ",", "'scratch'", "]", ":", "continue", "if", "val", "is", "None", ...
Update the parameters of the object that can run ScienceTools applications Parameters ---------- gtapp : `GtApp.GtApp` Object that will run the application in question kwargs : arguments used to invoke the application
[ "Update", "the", "parameters", "of", "the", "object", "that", "can", "run", "ScienceTools", "applications" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/gtlink.py#L42-L64
train
36,351
fermiPy/fermipy
fermipy/jobs/gtlink.py
build_gtapp
def build_gtapp(appname, dry_run, **kwargs): """Build an object that can run ScienceTools application Parameters ---------- appname : str Name of the application (e.g., gtbin) dry_run : bool Print command but do not run it kwargs : arguments used to invoke the application Returns `GtApp.GtApp` object that will run the application in question """ pfiles_orig = _set_pfiles(dry_run, **kwargs) gtapp = GtApp.GtApp(appname) update_gtapp(gtapp, **kwargs) _reset_pfiles(pfiles_orig) return gtapp
python
def build_gtapp(appname, dry_run, **kwargs): """Build an object that can run ScienceTools application Parameters ---------- appname : str Name of the application (e.g., gtbin) dry_run : bool Print command but do not run it kwargs : arguments used to invoke the application Returns `GtApp.GtApp` object that will run the application in question """ pfiles_orig = _set_pfiles(dry_run, **kwargs) gtapp = GtApp.GtApp(appname) update_gtapp(gtapp, **kwargs) _reset_pfiles(pfiles_orig) return gtapp
[ "def", "build_gtapp", "(", "appname", ",", "dry_run", ",", "*", "*", "kwargs", ")", ":", "pfiles_orig", "=", "_set_pfiles", "(", "dry_run", ",", "*", "*", "kwargs", ")", "gtapp", "=", "GtApp", ".", "GtApp", "(", "appname", ")", "update_gtapp", "(", "gt...
Build an object that can run ScienceTools application Parameters ---------- appname : str Name of the application (e.g., gtbin) dry_run : bool Print command but do not run it kwargs : arguments used to invoke the application Returns `GtApp.GtApp` object that will run the application in question
[ "Build", "an", "object", "that", "can", "run", "ScienceTools", "application" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/gtlink.py#L116-L135
train
36,352
fermiPy/fermipy
fermipy/jobs/gtlink.py
run_gtapp
def run_gtapp(gtapp, stream, dry_run, **kwargs): """Runs one on the ScienceTools apps Taken from fermipy.gtanalysis.run_gtapp by Matt Wood Parameters ---------- gtapp : `GtApp.GtApp` object The application (e.g., gtbin) stream : stream object Must have 'write' function dry_run : bool Print command but do not run it kwargs : arguments used to invoke the application """ if stream is None: stream = sys.stdout pfiles_orig = _set_pfiles(dry_run, **kwargs) update_gtapp(gtapp, **kwargs) stream.write("%s\n" % gtapp.command()) stream.flush() if dry_run: _reset_pfiles(pfiles_orig) return 0 try: stdin, stdout = gtapp.runWithOutput(print_command=False) for line in stdout: stream.write(line.strip()) stream.flush() return_code = 0 except: stream.write('Exited with exit code -1\n') return_code = -1 _reset_pfiles(pfiles_orig) return return_code
python
def run_gtapp(gtapp, stream, dry_run, **kwargs): """Runs one on the ScienceTools apps Taken from fermipy.gtanalysis.run_gtapp by Matt Wood Parameters ---------- gtapp : `GtApp.GtApp` object The application (e.g., gtbin) stream : stream object Must have 'write' function dry_run : bool Print command but do not run it kwargs : arguments used to invoke the application """ if stream is None: stream = sys.stdout pfiles_orig = _set_pfiles(dry_run, **kwargs) update_gtapp(gtapp, **kwargs) stream.write("%s\n" % gtapp.command()) stream.flush() if dry_run: _reset_pfiles(pfiles_orig) return 0 try: stdin, stdout = gtapp.runWithOutput(print_command=False) for line in stdout: stream.write(line.strip()) stream.flush() return_code = 0 except: stream.write('Exited with exit code -1\n') return_code = -1 _reset_pfiles(pfiles_orig) return return_code
[ "def", "run_gtapp", "(", "gtapp", ",", "stream", ",", "dry_run", ",", "*", "*", "kwargs", ")", ":", "if", "stream", "is", "None", ":", "stream", "=", "sys", ".", "stdout", "pfiles_orig", "=", "_set_pfiles", "(", "dry_run", ",", "*", "*", "kwargs", ")...
Runs one on the ScienceTools apps Taken from fermipy.gtanalysis.run_gtapp by Matt Wood Parameters ---------- gtapp : `GtApp.GtApp` object The application (e.g., gtbin) stream : stream object Must have 'write' function dry_run : bool Print command but do not run it kwargs : arguments used to invoke the application
[ "Runs", "one", "on", "the", "ScienceTools", "apps" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/gtlink.py#L138-L180
train
36,353
fermiPy/fermipy
fermipy/diffuse/gt_assemble_model.py
InitModel.run_analysis
def run_analysis(self, argv): """ Build the manifest for all the models """ args = self._parser.parse_args(argv) components = Component.build_from_yamlfile(args.comp) NAME_FACTORY.update_base_dict(args.data) model_dict = make_library(**args.__dict__) model_manager = model_dict['ModelManager'] models = load_yaml(args.models) data = args.data hpx_order = args.hpx_order for modelkey in models: model_manager.make_srcmap_manifest(modelkey, components, data) model_manager.make_fermipy_config_yaml(modelkey, components, data, hpx_order=hpx_order, irf_ver=NAME_FACTORY.irf_ver())
python
def run_analysis(self, argv): """ Build the manifest for all the models """ args = self._parser.parse_args(argv) components = Component.build_from_yamlfile(args.comp) NAME_FACTORY.update_base_dict(args.data) model_dict = make_library(**args.__dict__) model_manager = model_dict['ModelManager'] models = load_yaml(args.models) data = args.data hpx_order = args.hpx_order for modelkey in models: model_manager.make_srcmap_manifest(modelkey, components, data) model_manager.make_fermipy_config_yaml(modelkey, components, data, hpx_order=hpx_order, irf_ver=NAME_FACTORY.irf_ver())
[ "def", "run_analysis", "(", "self", ",", "argv", ")", ":", "args", "=", "self", ".", "_parser", ".", "parse_args", "(", "argv", ")", "components", "=", "Component", ".", "build_from_yamlfile", "(", "args", ".", "comp", ")", "NAME_FACTORY", ".", "update_bas...
Build the manifest for all the models
[ "Build", "the", "manifest", "for", "all", "the", "models" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/gt_assemble_model.py#L46-L61
train
36,354
fermiPy/fermipy
fermipy/diffuse/gt_assemble_model.py
AssembleModel.copy_ccube
def copy_ccube(ccube, outsrcmap, hpx_order): """Copy a counts cube into outsrcmap file reducing the HEALPix order to hpx_order if needed. """ sys.stdout.write(" Copying counts cube from %s to %s\n" % (ccube, outsrcmap)) try: hdulist_in = fits.open(ccube) except IOError: hdulist_in = fits.open("%s.gz" % ccube) hpx_order_in = hdulist_in[1].header['ORDER'] if hpx_order_in > hpx_order: hpxmap = HpxMap.create_from_hdulist(hdulist_in) hpxmap_out = hpxmap.ud_grade(hpx_order, preserve_counts=True) hpxlist_out = hdulist_in #hpxlist_out['SKYMAP'] = hpxmap_out.create_image_hdu() hpxlist_out[1] = hpxmap_out.create_image_hdu() hpxlist_out[1].name = 'SKYMAP' hpxlist_out.writeto(outsrcmap) return hpx_order else: os.system('cp %s %s' % (ccube, outsrcmap)) #os.system('cp %s.gz %s.gz' % (ccube, outsrcmap)) #os.system('gunzip -f %s.gz' % (outsrcmap)) return None
python
def copy_ccube(ccube, outsrcmap, hpx_order): """Copy a counts cube into outsrcmap file reducing the HEALPix order to hpx_order if needed. """ sys.stdout.write(" Copying counts cube from %s to %s\n" % (ccube, outsrcmap)) try: hdulist_in = fits.open(ccube) except IOError: hdulist_in = fits.open("%s.gz" % ccube) hpx_order_in = hdulist_in[1].header['ORDER'] if hpx_order_in > hpx_order: hpxmap = HpxMap.create_from_hdulist(hdulist_in) hpxmap_out = hpxmap.ud_grade(hpx_order, preserve_counts=True) hpxlist_out = hdulist_in #hpxlist_out['SKYMAP'] = hpxmap_out.create_image_hdu() hpxlist_out[1] = hpxmap_out.create_image_hdu() hpxlist_out[1].name = 'SKYMAP' hpxlist_out.writeto(outsrcmap) return hpx_order else: os.system('cp %s %s' % (ccube, outsrcmap)) #os.system('cp %s.gz %s.gz' % (ccube, outsrcmap)) #os.system('gunzip -f %s.gz' % (outsrcmap)) return None
[ "def", "copy_ccube", "(", "ccube", ",", "outsrcmap", ",", "hpx_order", ")", ":", "sys", ".", "stdout", ".", "write", "(", "\" Copying counts cube from %s to %s\\n\"", "%", "(", "ccube", ",", "outsrcmap", ")", ")", "try", ":", "hdulist_in", "=", "fits", ".",...
Copy a counts cube into outsrcmap file reducing the HEALPix order to hpx_order if needed.
[ "Copy", "a", "counts", "cube", "into", "outsrcmap", "file", "reducing", "the", "HEALPix", "order", "to", "hpx_order", "if", "needed", "." ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/gt_assemble_model.py#L79-L104
train
36,355
fermiPy/fermipy
fermipy/diffuse/gt_assemble_model.py
AssembleModel.append_hdus
def append_hdus(hdulist, srcmap_file, source_names, hpx_order): """Append HEALPix maps to a list Parameters ---------- hdulist : list The list being appended to srcmap_file : str Path to the file containing the HDUs source_names : list of str Names of the sources to extract from srcmap_file hpx_order : int Maximum order for maps """ sys.stdout.write(" Extracting %i sources from %s" % (len(source_names), srcmap_file)) try: hdulist_in = fits.open(srcmap_file) except IOError: try: hdulist_in = fits.open('%s.gz' % srcmap_file) except IOError: sys.stdout.write(" Missing file %s\n" % srcmap_file) return for source_name in source_names: sys.stdout.write('.') sys.stdout.flush() if hpx_order is None: hdulist.append(hdulist_in[source_name]) else: try: hpxmap = HpxMap.create_from_hdulist(hdulist_in, hdu=source_name) except IndexError: print(" Index error on source %s in file %s" % (source_name, srcmap_file)) continue except KeyError: print(" Key error on source %s in file %s" % (source_name, srcmap_file)) continue hpxmap_out = hpxmap.ud_grade(hpx_order, preserve_counts=True) hdulist.append(hpxmap_out.create_image_hdu(name=source_name)) sys.stdout.write("\n") hdulist.flush() hdulist_in.close()
python
def append_hdus(hdulist, srcmap_file, source_names, hpx_order): """Append HEALPix maps to a list Parameters ---------- hdulist : list The list being appended to srcmap_file : str Path to the file containing the HDUs source_names : list of str Names of the sources to extract from srcmap_file hpx_order : int Maximum order for maps """ sys.stdout.write(" Extracting %i sources from %s" % (len(source_names), srcmap_file)) try: hdulist_in = fits.open(srcmap_file) except IOError: try: hdulist_in = fits.open('%s.gz' % srcmap_file) except IOError: sys.stdout.write(" Missing file %s\n" % srcmap_file) return for source_name in source_names: sys.stdout.write('.') sys.stdout.flush() if hpx_order is None: hdulist.append(hdulist_in[source_name]) else: try: hpxmap = HpxMap.create_from_hdulist(hdulist_in, hdu=source_name) except IndexError: print(" Index error on source %s in file %s" % (source_name, srcmap_file)) continue except KeyError: print(" Key error on source %s in file %s" % (source_name, srcmap_file)) continue hpxmap_out = hpxmap.ud_grade(hpx_order, preserve_counts=True) hdulist.append(hpxmap_out.create_image_hdu(name=source_name)) sys.stdout.write("\n") hdulist.flush() hdulist_in.close()
[ "def", "append_hdus", "(", "hdulist", ",", "srcmap_file", ",", "source_names", ",", "hpx_order", ")", ":", "sys", ".", "stdout", ".", "write", "(", "\" Extracting %i sources from %s\"", "%", "(", "len", "(", "source_names", ")", ",", "srcmap_file", ")", ")", ...
Append HEALPix maps to a list Parameters ---------- hdulist : list The list being appended to srcmap_file : str Path to the file containing the HDUs source_names : list of str Names of the sources to extract from srcmap_file hpx_order : int Maximum order for maps
[ "Append", "HEALPix", "maps", "to", "a", "list" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/gt_assemble_model.py#L113-L156
train
36,356
fermiPy/fermipy
fermipy/diffuse/gt_assemble_model.py
AssembleModel.assemble_component
def assemble_component(compname, compinfo, hpx_order): """Assemble the source map file for one binning component Parameters ---------- compname : str The key for this component (e.g., E0_PSF3) compinfo : dict Information about this component hpx_order : int Maximum order for maps """ sys.stdout.write("Working on component %s\n" % compname) ccube = compinfo['ccube'] outsrcmap = compinfo['outsrcmap'] source_dict = compinfo['source_dict'] hpx_order = AssembleModel.copy_ccube(ccube, outsrcmap, hpx_order) hdulist = AssembleModel.open_outsrcmap(outsrcmap) for comp_name in sorted(source_dict.keys()): source_info = source_dict[comp_name] source_names = source_info['source_names'] srcmap_file = source_info['srcmap_file'] AssembleModel.append_hdus(hdulist, srcmap_file, source_names, hpx_order) sys.stdout.write("Done!\n")
python
def assemble_component(compname, compinfo, hpx_order): """Assemble the source map file for one binning component Parameters ---------- compname : str The key for this component (e.g., E0_PSF3) compinfo : dict Information about this component hpx_order : int Maximum order for maps """ sys.stdout.write("Working on component %s\n" % compname) ccube = compinfo['ccube'] outsrcmap = compinfo['outsrcmap'] source_dict = compinfo['source_dict'] hpx_order = AssembleModel.copy_ccube(ccube, outsrcmap, hpx_order) hdulist = AssembleModel.open_outsrcmap(outsrcmap) for comp_name in sorted(source_dict.keys()): source_info = source_dict[comp_name] source_names = source_info['source_names'] srcmap_file = source_info['srcmap_file'] AssembleModel.append_hdus(hdulist, srcmap_file, source_names, hpx_order) sys.stdout.write("Done!\n")
[ "def", "assemble_component", "(", "compname", ",", "compinfo", ",", "hpx_order", ")", ":", "sys", ".", "stdout", ".", "write", "(", "\"Working on component %s\\n\"", "%", "compname", ")", "ccube", "=", "compinfo", "[", "'ccube'", "]", "outsrcmap", "=", "compin...
Assemble the source map file for one binning component Parameters ---------- compname : str The key for this component (e.g., E0_PSF3) compinfo : dict Information about this component hpx_order : int Maximum order for maps
[ "Assemble", "the", "source", "map", "file", "for", "one", "binning", "component" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/gt_assemble_model.py#L159-L187
train
36,357
fermiPy/fermipy
fermipy/diffuse/gt_assemble_model.py
AssembleModel.run_analysis
def run_analysis(self, argv): """Assemble the source map file for one binning component FIXME """ args = self._parser.parse_args(argv) manifest = yaml.safe_load(open(args.input)) compname = args.compname value = manifest[compname] self.assemble_component(compname, value, args.hpx_order)
python
def run_analysis(self, argv): """Assemble the source map file for one binning component FIXME """ args = self._parser.parse_args(argv) manifest = yaml.safe_load(open(args.input)) compname = args.compname value = manifest[compname] self.assemble_component(compname, value, args.hpx_order)
[ "def", "run_analysis", "(", "self", ",", "argv", ")", ":", "args", "=", "self", ".", "_parser", ".", "parse_args", "(", "argv", ")", "manifest", "=", "yaml", ".", "safe_load", "(", "open", "(", "args", ".", "input", ")", ")", "compname", "=", "args",...
Assemble the source map file for one binning component FIXME
[ "Assemble", "the", "source", "map", "file", "for", "one", "binning", "component", "FIXME" ]
9df5e7e3728307fd58c5bba36fd86783c39fbad4
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/gt_assemble_model.py#L189-L198
train
36,358
jim-easterbrook/pywws
src/pywws/weatherstation.py
CUSBDrive.read_block
def read_block(self, address): """Read 32 bytes from the weather station. If the read fails for any reason, :obj:`None` is returned. :param address: address to read from. :type address: int :return: the data from the weather station. :rtype: list(int) """ buf = [ self.ReadCommand, address // 256, address % 256, self.EndMark, self.ReadCommand, address // 256, address % 256, self.EndMark, ] if not self.dev.write_data(buf): return None return self.dev.read_data(32)
python
def read_block(self, address): """Read 32 bytes from the weather station. If the read fails for any reason, :obj:`None` is returned. :param address: address to read from. :type address: int :return: the data from the weather station. :rtype: list(int) """ buf = [ self.ReadCommand, address // 256, address % 256, self.EndMark, self.ReadCommand, address // 256, address % 256, self.EndMark, ] if not self.dev.write_data(buf): return None return self.dev.read_data(32)
[ "def", "read_block", "(", "self", ",", "address", ")", ":", "buf", "=", "[", "self", ".", "ReadCommand", ",", "address", "//", "256", ",", "address", "%", "256", ",", "self", ".", "EndMark", ",", "self", ".", "ReadCommand", ",", "address", "//", "256...
Read 32 bytes from the weather station. If the read fails for any reason, :obj:`None` is returned. :param address: address to read from. :type address: int :return: the data from the weather station. :rtype: list(int)
[ "Read", "32", "bytes", "from", "the", "weather", "station", "." ]
4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/weatherstation.py#L325-L351
train
36,359
jim-easterbrook/pywws
src/pywws/weatherstation.py
CUSBDrive.write_byte
def write_byte(self, address, data): """Write a single byte to the weather station. :param address: address to write to. :type address: int :param data: the value to write. :type data: int :return: success status. :rtype: bool """ buf = [ self.WriteCommandWord, address // 256, address % 256, self.EndMark, self.WriteCommandWord, data, 0, self.EndMark, ] if not self.dev.write_data(buf): return False buf = self.dev.read_data(8) if buf is None: return False for byte in buf: if byte != 0xA5: return False return True
python
def write_byte(self, address, data): """Write a single byte to the weather station. :param address: address to write to. :type address: int :param data: the value to write. :type data: int :return: success status. :rtype: bool """ buf = [ self.WriteCommandWord, address // 256, address % 256, self.EndMark, self.WriteCommandWord, data, 0, self.EndMark, ] if not self.dev.write_data(buf): return False buf = self.dev.read_data(8) if buf is None: return False for byte in buf: if byte != 0xA5: return False return True
[ "def", "write_byte", "(", "self", ",", "address", ",", "data", ")", ":", "buf", "=", "[", "self", ".", "WriteCommandWord", ",", "address", "//", "256", ",", "address", "%", "256", ",", "self", ".", "EndMark", ",", "self", ".", "WriteCommandWord", ",", ...
Write a single byte to the weather station. :param address: address to write to. :type address: int :param data: the value to write. :type data: int :return: success status. :rtype: bool
[ "Write", "a", "single", "byte", "to", "the", "weather", "station", "." ]
4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/weatherstation.py#L353-L387
train
36,360
jim-easterbrook/pywws
src/pywws/weatherstation.py
WeatherStation.inc_ptr
def inc_ptr(self, ptr): """Get next circular buffer data pointer.""" result = ptr + self.reading_len[self.ws_type] if result >= 0x10000: result = self.data_start return result
python
def inc_ptr(self, ptr): """Get next circular buffer data pointer.""" result = ptr + self.reading_len[self.ws_type] if result >= 0x10000: result = self.data_start return result
[ "def", "inc_ptr", "(", "self", ",", "ptr", ")", ":", "result", "=", "ptr", "+", "self", ".", "reading_len", "[", "self", ".", "ws_type", "]", "if", "result", ">=", "0x10000", ":", "result", "=", "self", ".", "data_start", "return", "result" ]
Get next circular buffer data pointer.
[ "Get", "next", "circular", "buffer", "data", "pointer", "." ]
4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/weatherstation.py#L658-L663
train
36,361
jim-easterbrook/pywws
src/pywws/weatherstation.py
WeatherStation.dec_ptr
def dec_ptr(self, ptr): """Get previous circular buffer data pointer.""" result = ptr - self.reading_len[self.ws_type] if result < self.data_start: result = 0x10000 - self.reading_len[self.ws_type] return result
python
def dec_ptr(self, ptr): """Get previous circular buffer data pointer.""" result = ptr - self.reading_len[self.ws_type] if result < self.data_start: result = 0x10000 - self.reading_len[self.ws_type] return result
[ "def", "dec_ptr", "(", "self", ",", "ptr", ")", ":", "result", "=", "ptr", "-", "self", ".", "reading_len", "[", "self", ".", "ws_type", "]", "if", "result", "<", "self", ".", "data_start", ":", "result", "=", "0x10000", "-", "self", ".", "reading_le...
Get previous circular buffer data pointer.
[ "Get", "previous", "circular", "buffer", "data", "pointer", "." ]
4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/weatherstation.py#L665-L670
train
36,362
jim-easterbrook/pywws
src/pywws/weatherstation.py
WeatherStation.get_raw_data
def get_raw_data(self, ptr, unbuffered=False): """Get raw data from circular buffer. If unbuffered is false then a cached value that was obtained earlier may be returned.""" if unbuffered: self._data_pos = None # round down ptr to a 'block boundary' idx = ptr - (ptr % 0x20) ptr -= idx count = self.reading_len[self.ws_type] if self._data_pos == idx: # cache contains useful data result = self._data_block[ptr:ptr + count] if len(result) >= count: return result else: result = list() if ptr + count > 0x20: # need part of next block, which may be in cache if self._data_pos != idx + 0x20: self._data_pos = idx + 0x20 self._data_block = self._read_block(self._data_pos) result += self._data_block[0:ptr + count - 0x20] if len(result) >= count: return result # read current block self._data_pos = idx self._data_block = self._read_block(self._data_pos) result = self._data_block[ptr:ptr + count] + result return result
python
def get_raw_data(self, ptr, unbuffered=False): """Get raw data from circular buffer. If unbuffered is false then a cached value that was obtained earlier may be returned.""" if unbuffered: self._data_pos = None # round down ptr to a 'block boundary' idx = ptr - (ptr % 0x20) ptr -= idx count = self.reading_len[self.ws_type] if self._data_pos == idx: # cache contains useful data result = self._data_block[ptr:ptr + count] if len(result) >= count: return result else: result = list() if ptr + count > 0x20: # need part of next block, which may be in cache if self._data_pos != idx + 0x20: self._data_pos = idx + 0x20 self._data_block = self._read_block(self._data_pos) result += self._data_block[0:ptr + count - 0x20] if len(result) >= count: return result # read current block self._data_pos = idx self._data_block = self._read_block(self._data_pos) result = self._data_block[ptr:ptr + count] + result return result
[ "def", "get_raw_data", "(", "self", ",", "ptr", ",", "unbuffered", "=", "False", ")", ":", "if", "unbuffered", ":", "self", ".", "_data_pos", "=", "None", "# round down ptr to a 'block boundary'", "idx", "=", "ptr", "-", "(", "ptr", "%", "0x20", ")", "ptr"...
Get raw data from circular buffer. If unbuffered is false then a cached value that was obtained earlier may be returned.
[ "Get", "raw", "data", "from", "circular", "buffer", "." ]
4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/weatherstation.py#L672-L702
train
36,363
jim-easterbrook/pywws
src/pywws/weatherstation.py
WeatherStation.get_data
def get_data(self, ptr, unbuffered=False): """Get decoded data from circular buffer. If unbuffered is false then a cached value that was obtained earlier may be returned.""" result = _decode(self.get_raw_data(ptr, unbuffered), self._reading_format[self.ws_type]) return result
python
def get_data(self, ptr, unbuffered=False): """Get decoded data from circular buffer. If unbuffered is false then a cached value that was obtained earlier may be returned.""" result = _decode(self.get_raw_data(ptr, unbuffered), self._reading_format[self.ws_type]) return result
[ "def", "get_data", "(", "self", ",", "ptr", ",", "unbuffered", "=", "False", ")", ":", "result", "=", "_decode", "(", "self", ".", "get_raw_data", "(", "ptr", ",", "unbuffered", ")", ",", "self", ".", "_reading_format", "[", "self", ".", "ws_type", "]"...
Get decoded data from circular buffer. If unbuffered is false then a cached value that was obtained earlier may be returned.
[ "Get", "decoded", "data", "from", "circular", "buffer", "." ]
4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/weatherstation.py#L704-L711
train
36,364
jim-easterbrook/pywws
src/pywws/weatherstation.py
WeatherStation.current_pos
def current_pos(self): """Get circular buffer location where current data is being written.""" new_ptr = _decode( self._read_fixed_block(0x0020), self.lo_fix_format['current_pos']) if new_ptr == self._current_ptr: return self._current_ptr if self._current_ptr and new_ptr != self.inc_ptr(self._current_ptr): logger.error( 'unexpected ptr change %06x -> %06x', self._current_ptr, new_ptr) self._current_ptr = new_ptr return self._current_ptr
python
def current_pos(self): """Get circular buffer location where current data is being written.""" new_ptr = _decode( self._read_fixed_block(0x0020), self.lo_fix_format['current_pos']) if new_ptr == self._current_ptr: return self._current_ptr if self._current_ptr and new_ptr != self.inc_ptr(self._current_ptr): logger.error( 'unexpected ptr change %06x -> %06x', self._current_ptr, new_ptr) self._current_ptr = new_ptr return self._current_ptr
[ "def", "current_pos", "(", "self", ")", ":", "new_ptr", "=", "_decode", "(", "self", ".", "_read_fixed_block", "(", "0x0020", ")", ",", "self", ".", "lo_fix_format", "[", "'current_pos'", "]", ")", "if", "new_ptr", "==", "self", ".", "_current_ptr", ":", ...
Get circular buffer location where current data is being written.
[ "Get", "circular", "buffer", "location", "where", "current", "data", "is", "being", "written", "." ]
4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/weatherstation.py#L713-L723
train
36,365
jim-easterbrook/pywws
src/pywws/device_pyusb.py
USBDevice._find_device
def _find_device(self, idVendor, idProduct): """Find a USB device by product and vendor id.""" for bus in usb.busses(): for device in bus.devices: if (device.idVendor == idVendor and device.idProduct == idProduct): return device return None
python
def _find_device(self, idVendor, idProduct): """Find a USB device by product and vendor id.""" for bus in usb.busses(): for device in bus.devices: if (device.idVendor == idVendor and device.idProduct == idProduct): return device return None
[ "def", "_find_device", "(", "self", ",", "idVendor", ",", "idProduct", ")", ":", "for", "bus", "in", "usb", ".", "busses", "(", ")", ":", "for", "device", "in", "bus", ".", "devices", ":", "if", "(", "device", ".", "idVendor", "==", "idVendor", "and"...
Find a USB device by product and vendor id.
[ "Find", "a", "USB", "device", "by", "product", "and", "vendor", "id", "." ]
4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/device_pyusb.py#L106-L113
train
36,366
jim-easterbrook/pywws
src/pywws/sqlite3data.py
_adapt_WSDateTime
def _adapt_WSDateTime(dt): """Return unix timestamp of the datetime like input. If conversion overflows high, return sint64_max , if underflows, return 0 """ try: ts = int( (dt.replace(tzinfo=pytz.utc) - datetime(1970,1,1,tzinfo=pytz.utc) ).total_seconds() ) except (OverflowError,OSError): if dt < datetime.now(): ts = 0 else: ts = 2**63-1 return ts
python
def _adapt_WSDateTime(dt): """Return unix timestamp of the datetime like input. If conversion overflows high, return sint64_max , if underflows, return 0 """ try: ts = int( (dt.replace(tzinfo=pytz.utc) - datetime(1970,1,1,tzinfo=pytz.utc) ).total_seconds() ) except (OverflowError,OSError): if dt < datetime.now(): ts = 0 else: ts = 2**63-1 return ts
[ "def", "_adapt_WSDateTime", "(", "dt", ")", ":", "try", ":", "ts", "=", "int", "(", "(", "dt", ".", "replace", "(", "tzinfo", "=", "pytz", ".", "utc", ")", "-", "datetime", "(", "1970", ",", "1", ",", "1", ",", "tzinfo", "=", "pytz", ".", "utc"...
Return unix timestamp of the datetime like input. If conversion overflows high, return sint64_max , if underflows, return 0
[ "Return", "unix", "timestamp", "of", "the", "datetime", "like", "input", ".", "If", "conversion", "overflows", "high", "return", "sint64_max", "if", "underflows", "return", "0" ]
4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/sqlite3data.py#L93-L109
train
36,367
jim-easterbrook/pywws
src/pywws/sqlite3data.py
CoreStore._predicate
def _predicate(self, i): """Given a valid datetime or slace, return the predicate portion of the SQL query, a boolean indicating whether multiple items are expected from the result, and a dictionary of parameters for the query """ if isinstance(i, slice): if i.step is not None: raise TypeError("Slice step not permitted") if ( (i.start is not None and not isinstance(i.start, datetime)) or (i.stop is not None and not isinstance(i.stop, datetime)) ): raise TypeError( "Slice indices must be {} or None".format(datetime) ) if i.start is not None and i.stop is not None: if i.start > i.stop: raise ValueError( "Start index is greater than the End index" ) else: # Substitution of the key coloumn, but the # parameters themselves will be substituted by sqlite3 predicate = "WHERE {} BETWEEN :start AND :stop".format( self._keycol ) elif i.start is not None: # i.stop will also be None predicate = "WHERE {} >= :start".format(self._keycol) elif i.stop is not None: # i.start will also be None predicate = "WHERE {} <= :stop".format(self._keycol) else: # both are None, so equivelent to wanting everything predicate = "" multi = True pred = {"start": i.start, "stop": i.stop} elif isinstance(i, datetime): # Substitution of the key coloumn, but the # parameters themselves will be substituted by sqlite3 predicate = "WHERE {} = :key".format(self._keycol) multi = False pred = {"key": i} else: # not a slice or a datetime object raise TypeError("List indices must be {}".format(datetime)) # predicate is the end of the query string. # multi is a boolean indicating whether the result should be iterable # or not. pred is a dict of the parameters for substitution return (predicate, multi, pred)
python
def _predicate(self, i): """Given a valid datetime or slace, return the predicate portion of the SQL query, a boolean indicating whether multiple items are expected from the result, and a dictionary of parameters for the query """ if isinstance(i, slice): if i.step is not None: raise TypeError("Slice step not permitted") if ( (i.start is not None and not isinstance(i.start, datetime)) or (i.stop is not None and not isinstance(i.stop, datetime)) ): raise TypeError( "Slice indices must be {} or None".format(datetime) ) if i.start is not None and i.stop is not None: if i.start > i.stop: raise ValueError( "Start index is greater than the End index" ) else: # Substitution of the key coloumn, but the # parameters themselves will be substituted by sqlite3 predicate = "WHERE {} BETWEEN :start AND :stop".format( self._keycol ) elif i.start is not None: # i.stop will also be None predicate = "WHERE {} >= :start".format(self._keycol) elif i.stop is not None: # i.start will also be None predicate = "WHERE {} <= :stop".format(self._keycol) else: # both are None, so equivelent to wanting everything predicate = "" multi = True pred = {"start": i.start, "stop": i.stop} elif isinstance(i, datetime): # Substitution of the key coloumn, but the # parameters themselves will be substituted by sqlite3 predicate = "WHERE {} = :key".format(self._keycol) multi = False pred = {"key": i} else: # not a slice or a datetime object raise TypeError("List indices must be {}".format(datetime)) # predicate is the end of the query string. # multi is a boolean indicating whether the result should be iterable # or not. pred is a dict of the parameters for substitution return (predicate, multi, pred)
[ "def", "_predicate", "(", "self", ",", "i", ")", ":", "if", "isinstance", "(", "i", ",", "slice", ")", ":", "if", "i", ".", "step", "is", "not", "None", ":", "raise", "TypeError", "(", "\"Slice step not permitted\"", ")", "if", "(", "(", "i", ".", ...
Given a valid datetime or slace, return the predicate portion of the SQL query, a boolean indicating whether multiple items are expected from the result, and a dictionary of parameters for the query
[ "Given", "a", "valid", "datetime", "or", "slace", "return", "the", "predicate", "portion", "of", "the", "SQL", "query", "a", "boolean", "indicating", "whether", "multiple", "items", "are", "expected", "from", "the", "result", "and", "a", "dictionary", "of", ...
4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/sqlite3data.py#L282-L330
train
36,368
jim-easterbrook/pywws
src/pywws/localisation.py
set_locale
def set_locale(lang): """Set the 'locale' used by a program. This affects the entire application, changing the way dates, currencies and numbers are represented. It should not be called from a library routine that may be used in another program. The ``lang`` parameter can be any string that is recognised by ``locale.setlocale()``, for example ``en``, ``en_GB`` or ``en_GB.UTF-8``. :param lang: language code. :type lang: string :return: success status. :rtype: bool """ # get the default locale lc, encoding = locale.getdefaultlocale() try: if '.' in lang: locale.setlocale(locale.LC_ALL, lang) else: locale.setlocale(locale.LC_ALL, (lang, encoding)) except locale.Error: return False return True
python
def set_locale(lang): """Set the 'locale' used by a program. This affects the entire application, changing the way dates, currencies and numbers are represented. It should not be called from a library routine that may be used in another program. The ``lang`` parameter can be any string that is recognised by ``locale.setlocale()``, for example ``en``, ``en_GB`` or ``en_GB.UTF-8``. :param lang: language code. :type lang: string :return: success status. :rtype: bool """ # get the default locale lc, encoding = locale.getdefaultlocale() try: if '.' in lang: locale.setlocale(locale.LC_ALL, lang) else: locale.setlocale(locale.LC_ALL, (lang, encoding)) except locale.Error: return False return True
[ "def", "set_locale", "(", "lang", ")", ":", "# get the default locale", "lc", ",", "encoding", "=", "locale", ".", "getdefaultlocale", "(", ")", "try", ":", "if", "'.'", "in", "lang", ":", "locale", ".", "setlocale", "(", "locale", ".", "LC_ALL", ",", "l...
Set the 'locale' used by a program. This affects the entire application, changing the way dates, currencies and numbers are represented. It should not be called from a library routine that may be used in another program. The ``lang`` parameter can be any string that is recognised by ``locale.setlocale()``, for example ``en``, ``en_GB`` or ``en_GB.UTF-8``. :param lang: language code. :type lang: string :return: success status. :rtype: bool
[ "Set", "the", "locale", "used", "by", "a", "program", "." ]
4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/localisation.py#L141-L166
train
36,369
jim-easterbrook/pywws
src/pywws/localisation.py
set_application_language
def set_application_language(params): """Set the locale and translation for a pywws program. This function reads the language from the configuration file, then calls :func:`set_locale` and :func:`set_translation`. :param params: a :class:`pywws.storage.params` object. :type params: object """ lang = params.get('config', 'language', None) if lang: set_locale(lang) set_translation(lang)
python
def set_application_language(params): """Set the locale and translation for a pywws program. This function reads the language from the configuration file, then calls :func:`set_locale` and :func:`set_translation`. :param params: a :class:`pywws.storage.params` object. :type params: object """ lang = params.get('config', 'language', None) if lang: set_locale(lang) set_translation(lang)
[ "def", "set_application_language", "(", "params", ")", ":", "lang", "=", "params", ".", "get", "(", "'config'", ",", "'language'", ",", "None", ")", "if", "lang", ":", "set_locale", "(", "lang", ")", "set_translation", "(", "lang", ")" ]
Set the locale and translation for a pywws program. This function reads the language from the configuration file, then calls :func:`set_locale` and :func:`set_translation`. :param params: a :class:`pywws.storage.params` object. :type params: object
[ "Set", "the", "locale", "and", "translation", "for", "a", "pywws", "program", "." ]
4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/localisation.py#L210-L224
train
36,370
jim-easterbrook/pywws
src/pywws/storage.py
ParamStore.get
def get(self, section, option, default=None): """Get a parameter value and return a string. If default is specified and section or option are not defined in the file, they are created and set to default, which is then the return value. """ with self._lock: if not self._config.has_option(section, option): if default is not None: self._set(section, option, default) return default return self._config.get(section, option)
python
def get(self, section, option, default=None): """Get a parameter value and return a string. If default is specified and section or option are not defined in the file, they are created and set to default, which is then the return value. """ with self._lock: if not self._config.has_option(section, option): if default is not None: self._set(section, option, default) return default return self._config.get(section, option)
[ "def", "get", "(", "self", ",", "section", ",", "option", ",", "default", "=", "None", ")", ":", "with", "self", ".", "_lock", ":", "if", "not", "self", ".", "_config", ".", "has_option", "(", "section", ",", "option", ")", ":", "if", "default", "i...
Get a parameter value and return a string. If default is specified and section or option are not defined in the file, they are created and set to default, which is then the return value.
[ "Get", "a", "parameter", "value", "and", "return", "a", "string", "." ]
4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/storage.py#L107-L120
train
36,371
jim-easterbrook/pywws
src/pywws/storage.py
ParamStore.set
def set(self, section, option, value): """Set option in section to string value.""" with self._lock: self._set(section, option, value)
python
def set(self, section, option, value): """Set option in section to string value.""" with self._lock: self._set(section, option, value)
[ "def", "set", "(", "self", ",", "section", ",", "option", ",", "value", ")", ":", "with", "self", ".", "_lock", ":", "self", ".", "_set", "(", "section", ",", "option", ",", "value", ")" ]
Set option in section to string value.
[ "Set", "option", "in", "section", "to", "string", "value", "." ]
4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/storage.py#L128-L131
train
36,372
jim-easterbrook/pywws
src/pywws/storage.py
ParamStore.unset
def unset(self, section, option): """Remove option from section.""" with self._lock: if not self._config.has_section(section): return if self._config.has_option(section, option): self._config.remove_option(section, option) self._dirty = True if not self._config.options(section): self._config.remove_section(section) self._dirty = True
python
def unset(self, section, option): """Remove option from section.""" with self._lock: if not self._config.has_section(section): return if self._config.has_option(section, option): self._config.remove_option(section, option) self._dirty = True if not self._config.options(section): self._config.remove_section(section) self._dirty = True
[ "def", "unset", "(", "self", ",", "section", ",", "option", ")", ":", "with", "self", ".", "_lock", ":", "if", "not", "self", ".", "_config", ".", "has_section", "(", "section", ")", ":", "return", "if", "self", ".", "_config", ".", "has_option", "("...
Remove option from section.
[ "Remove", "option", "from", "section", "." ]
4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/storage.py#L142-L152
train
36,373
jim-easterbrook/pywws
src/pywws/service/__init__.py
ServiceBase.check_params
def check_params(self, *keys): """Ensure user has set required values in weather.ini. Normally the :py:data:`~ServiceBase.config` names with ``required`` set are checked, but if your uploader has a ``register`` method you may need to check for other data. :param str keys: the :py:data:`~ServiceBase.config` names to verify. """ for key in keys: if not self.params[key]: raise RuntimeError('"{}" not set in weather.ini'.format(key))
python
def check_params(self, *keys): """Ensure user has set required values in weather.ini. Normally the :py:data:`~ServiceBase.config` names with ``required`` set are checked, but if your uploader has a ``register`` method you may need to check for other data. :param str keys: the :py:data:`~ServiceBase.config` names to verify. """ for key in keys: if not self.params[key]: raise RuntimeError('"{}" not set in weather.ini'.format(key))
[ "def", "check_params", "(", "self", ",", "*", "keys", ")", ":", "for", "key", "in", "keys", ":", "if", "not", "self", ".", "params", "[", "key", "]", ":", "raise", "RuntimeError", "(", "'\"{}\" not set in weather.ini'", ".", "format", "(", "key", ")", ...
Ensure user has set required values in weather.ini. Normally the :py:data:`~ServiceBase.config` names with ``required`` set are checked, but if your uploader has a ``register`` method you may need to check for other data. :param str keys: the :py:data:`~ServiceBase.config` names to verify.
[ "Ensure", "user", "has", "set", "required", "values", "in", "weather", ".", "ini", "." ]
4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/service/__init__.py#L134-L147
train
36,374
jim-easterbrook/pywws
src/pywws/datastoretransfer.py
monitor
def monitor(i): """Given an iterator, yields data from it but prints progress every 10,000 records""" count = 0 for x in i: count+=1 if count % 10000 == 0: logger.info("%d records so far, current record is %s", count, x["idx"]) yield x
python
def monitor(i): """Given an iterator, yields data from it but prints progress every 10,000 records""" count = 0 for x in i: count+=1 if count % 10000 == 0: logger.info("%d records so far, current record is %s", count, x["idx"]) yield x
[ "def", "monitor", "(", "i", ")", ":", "count", "=", "0", "for", "x", "in", "i", ":", "count", "+=", "1", "if", "count", "%", "10000", "==", "0", ":", "logger", ".", "info", "(", "\"%d records so far, current record is %s\"", ",", "count", ",", "x", "...
Given an iterator, yields data from it but prints progress every 10,000 records
[ "Given", "an", "iterator", "yields", "data", "from", "it", "but", "prints", "progress", "every", "10", "000", "records" ]
4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/datastoretransfer.py#L37-L46
train
36,375
jim-easterbrook/pywws
src/pywws/process.py
calibrate_data
def calibrate_data(params, raw_data, calib_data): """'Calibrate' raw data, using a user-supplied function.""" start = calib_data.before(datetime.max) if start is None: start = datetime.min start = raw_data.after(start + SECOND) if start is None: return start del calib_data[start:] calibrator = Calib(params, raw_data) def calibgen(inputdata): """Internal generator function""" count = 0 for data in inputdata: idx = data['idx'] count += 1 if count % 10000 == 0: logger.info("calib: %s", idx.isoformat(' ')) elif count % 500 == 0: logger.debug("calib: %s", idx.isoformat(' ')) for key in ('rain', 'abs_pressure', 'temp_in'): if data[key] is None: logger.error('Ignoring invalid data at %s', idx.isoformat(' ')) break else: yield calibrator.calib(data) calib_data.update(calibgen(raw_data[start:])) return start
python
def calibrate_data(params, raw_data, calib_data): """'Calibrate' raw data, using a user-supplied function.""" start = calib_data.before(datetime.max) if start is None: start = datetime.min start = raw_data.after(start + SECOND) if start is None: return start del calib_data[start:] calibrator = Calib(params, raw_data) def calibgen(inputdata): """Internal generator function""" count = 0 for data in inputdata: idx = data['idx'] count += 1 if count % 10000 == 0: logger.info("calib: %s", idx.isoformat(' ')) elif count % 500 == 0: logger.debug("calib: %s", idx.isoformat(' ')) for key in ('rain', 'abs_pressure', 'temp_in'): if data[key] is None: logger.error('Ignoring invalid data at %s', idx.isoformat(' ')) break else: yield calibrator.calib(data) calib_data.update(calibgen(raw_data[start:])) return start
[ "def", "calibrate_data", "(", "params", ",", "raw_data", ",", "calib_data", ")", ":", "start", "=", "calib_data", ".", "before", "(", "datetime", ".", "max", ")", "if", "start", "is", "None", ":", "start", "=", "datetime", ".", "min", "start", "=", "ra...
Calibrate' raw data, using a user-supplied function.
[ "Calibrate", "raw", "data", "using", "a", "user", "-", "supplied", "function", "." ]
4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/process.py#L521-L548
train
36,376
jim-easterbrook/pywws
src/pywws/process.py
generate_hourly
def generate_hourly(calib_data, hourly_data, process_from): """Generate hourly summaries from calibrated data.""" start = hourly_data.before(datetime.max) if start is None: start = datetime.min start = calib_data.after(start + SECOND) if process_from: if start: start = min(start, process_from) else: start = process_from if start is None: return start # set start of hour in local time (not all time offsets are integer hours) start += timezone.standard_offset start = start.replace(minute=0, second=0) start -= timezone.standard_offset del hourly_data[start:] # preload pressure history, and find last valid rain prev = None pressure_history = deque() last_rain = None for data in calib_data[start - HOURx3:start]: if data['rel_pressure']: pressure_history.append((data['idx'], data['rel_pressure'])) if data['rain'] is not None: last_rain = data['rain'] prev = data # iterate over data in one hour chunks stop = calib_data.before(datetime.max) acc = HourAcc(last_rain) def hourlygen(inputdata, prev): """Internal generator function""" hour_start = start count = 0 while hour_start <= stop: count += 1 if count % 1008 == 0: logger.info("hourly: %s", hour_start.isoformat(' ')) elif count % 24 == 0: logger.debug("hourly: %s", hour_start.isoformat(' ')) hour_end = hour_start + HOUR acc.reset() for data in inputdata[hour_start:hour_end]: if data['rel_pressure']: pressure_history.append((data['idx'], data['rel_pressure'])) if prev: err = data['idx'] - prev['idx'] if abs(err - timedelta(minutes=data['delay'])) > TIME_ERR: logger.info('unexpected data interval %s %s', data['idx'].isoformat(' '), str(err)) acc.add_raw(data) prev = data new_data = acc.result() if new_data and (new_data['idx'] - hour_start) >= timedelta(minutes=9): # compute pressure trend new_data['pressure_trend'] = None if new_data['rel_pressure']: target = new_data['idx'] - HOURx3 while (len(pressure_history) >= 2 and abs(pressure_history[0][0] - target) > abs(pressure_history[1][0] - target)): pressure_history.popleft() if (pressure_history and abs(pressure_history[0][0] - target) < HOUR): new_data['pressure_trend'] = ( new_data['rel_pressure'] - pressure_history[0][1]) # store new hourly data yield new_data hour_start = hour_end hourly_data.update(hourlygen(calib_data, prev)) return start
python
def generate_hourly(calib_data, hourly_data, process_from): """Generate hourly summaries from calibrated data.""" start = hourly_data.before(datetime.max) if start is None: start = datetime.min start = calib_data.after(start + SECOND) if process_from: if start: start = min(start, process_from) else: start = process_from if start is None: return start # set start of hour in local time (not all time offsets are integer hours) start += timezone.standard_offset start = start.replace(minute=0, second=0) start -= timezone.standard_offset del hourly_data[start:] # preload pressure history, and find last valid rain prev = None pressure_history = deque() last_rain = None for data in calib_data[start - HOURx3:start]: if data['rel_pressure']: pressure_history.append((data['idx'], data['rel_pressure'])) if data['rain'] is not None: last_rain = data['rain'] prev = data # iterate over data in one hour chunks stop = calib_data.before(datetime.max) acc = HourAcc(last_rain) def hourlygen(inputdata, prev): """Internal generator function""" hour_start = start count = 0 while hour_start <= stop: count += 1 if count % 1008 == 0: logger.info("hourly: %s", hour_start.isoformat(' ')) elif count % 24 == 0: logger.debug("hourly: %s", hour_start.isoformat(' ')) hour_end = hour_start + HOUR acc.reset() for data in inputdata[hour_start:hour_end]: if data['rel_pressure']: pressure_history.append((data['idx'], data['rel_pressure'])) if prev: err = data['idx'] - prev['idx'] if abs(err - timedelta(minutes=data['delay'])) > TIME_ERR: logger.info('unexpected data interval %s %s', data['idx'].isoformat(' '), str(err)) acc.add_raw(data) prev = data new_data = acc.result() if new_data and (new_data['idx'] - hour_start) >= timedelta(minutes=9): # compute pressure trend new_data['pressure_trend'] = None if new_data['rel_pressure']: target = new_data['idx'] - HOURx3 while (len(pressure_history) >= 2 and abs(pressure_history[0][0] - target) > abs(pressure_history[1][0] - target)): pressure_history.popleft() if (pressure_history and abs(pressure_history[0][0] - target) < HOUR): new_data['pressure_trend'] = ( new_data['rel_pressure'] - pressure_history[0][1]) # store new hourly data yield new_data hour_start = hour_end hourly_data.update(hourlygen(calib_data, prev)) return start
[ "def", "generate_hourly", "(", "calib_data", ",", "hourly_data", ",", "process_from", ")", ":", "start", "=", "hourly_data", ".", "before", "(", "datetime", ".", "max", ")", "if", "start", "is", "None", ":", "start", "=", "datetime", ".", "min", "start", ...
Generate hourly summaries from calibrated data.
[ "Generate", "hourly", "summaries", "from", "calibrated", "data", "." ]
4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/process.py#L551-L622
train
36,377
jim-easterbrook/pywws
src/pywws/process.py
generate_daily
def generate_daily(day_end_hour, use_dst, calib_data, hourly_data, daily_data, process_from): """Generate daily summaries from calibrated and hourly data.""" start = daily_data.before(datetime.max) if start is None: start = datetime.min start = calib_data.after(start + SECOND) if process_from: if start: start = min(start, process_from) else: start = process_from if start is None: return start # round to start of this day, in local time start = timezone.local_replace( start, use_dst=use_dst, hour=day_end_hour, minute=0, second=0) del daily_data[start:] stop = calib_data.before(datetime.max) acc = DayAcc() def dailygen(inputdata): """Internal generator function""" day_start = start count = 0 while day_start <= stop: count += 1 if count % 30 == 0: logger.info("daily: %s", day_start.isoformat(' ')) else: logger.debug("daily: %s", day_start.isoformat(' ')) day_end = day_start + DAY if use_dst: # day might be 23 or 25 hours long day_end = timezone.local_replace( day_end + HOURx3, use_dst=use_dst, hour=day_end_hour) acc.reset() for data in inputdata[day_start:day_end]: acc.add_raw(data) for data in hourly_data[day_start:day_end]: acc.add_hourly(data) new_data = acc.result() if new_data: new_data['start'] = day_start yield new_data day_start = day_end daily_data.update(dailygen(calib_data)) return start
python
def generate_daily(day_end_hour, use_dst, calib_data, hourly_data, daily_data, process_from): """Generate daily summaries from calibrated and hourly data.""" start = daily_data.before(datetime.max) if start is None: start = datetime.min start = calib_data.after(start + SECOND) if process_from: if start: start = min(start, process_from) else: start = process_from if start is None: return start # round to start of this day, in local time start = timezone.local_replace( start, use_dst=use_dst, hour=day_end_hour, minute=0, second=0) del daily_data[start:] stop = calib_data.before(datetime.max) acc = DayAcc() def dailygen(inputdata): """Internal generator function""" day_start = start count = 0 while day_start <= stop: count += 1 if count % 30 == 0: logger.info("daily: %s", day_start.isoformat(' ')) else: logger.debug("daily: %s", day_start.isoformat(' ')) day_end = day_start + DAY if use_dst: # day might be 23 or 25 hours long day_end = timezone.local_replace( day_end + HOURx3, use_dst=use_dst, hour=day_end_hour) acc.reset() for data in inputdata[day_start:day_end]: acc.add_raw(data) for data in hourly_data[day_start:day_end]: acc.add_hourly(data) new_data = acc.result() if new_data: new_data['start'] = day_start yield new_data day_start = day_end daily_data.update(dailygen(calib_data)) return start
[ "def", "generate_daily", "(", "day_end_hour", ",", "use_dst", ",", "calib_data", ",", "hourly_data", ",", "daily_data", ",", "process_from", ")", ":", "start", "=", "daily_data", ".", "before", "(", "datetime", ".", "max", ")", "if", "start", "is", "None", ...
Generate daily summaries from calibrated and hourly data.
[ "Generate", "daily", "summaries", "from", "calibrated", "and", "hourly", "data", "." ]
4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/process.py#L625-L671
train
36,378
jim-easterbrook/pywws
src/pywws/process.py
generate_monthly
def generate_monthly(rain_day_threshold, day_end_hour, use_dst, daily_data, monthly_data, process_from): """Generate monthly summaries from daily data.""" start = monthly_data.before(datetime.max) if start is None: start = datetime.min start = daily_data.after(start + SECOND) if process_from: if start: start = min(start, process_from) else: start = process_from if start is None: return start # set start to start of first day of month (local time) start = timezone.local_replace( start, use_dst=use_dst, day=1, hour=day_end_hour, minute=0, second=0) if day_end_hour >= 12: # month actually starts on the last day of previous month start -= DAY del monthly_data[start:] stop = daily_data.before(datetime.max) if stop is None: return None acc = MonthAcc(rain_day_threshold) def monthlygen(inputdata): """Internal generator function""" month_start = start count = 0 while month_start <= stop: count += 1 if count % 12 == 0: logger.info("monthly: %s", month_start.isoformat(' ')) else: logger.debug("monthly: %s", month_start.isoformat(' ')) month_end = month_start + WEEK if month_end.month < 12: month_end = month_end.replace(month=month_end.month+1) else: month_end = month_end.replace(month=1, year=month_end.year+1) month_end = month_end - WEEK if use_dst: # month might straddle summer time start or end month_end = timezone.local_replace( month_end + HOURx3, use_dst=use_dst, hour=day_end_hour) acc.reset() for data in inputdata[month_start:month_end]: acc.add_daily(data) new_data = acc.result() if new_data: new_data['start'] = month_start yield new_data month_start = month_end monthly_data.update(monthlygen(daily_data)) return start
python
def generate_monthly(rain_day_threshold, day_end_hour, use_dst, daily_data, monthly_data, process_from): """Generate monthly summaries from daily data.""" start = monthly_data.before(datetime.max) if start is None: start = datetime.min start = daily_data.after(start + SECOND) if process_from: if start: start = min(start, process_from) else: start = process_from if start is None: return start # set start to start of first day of month (local time) start = timezone.local_replace( start, use_dst=use_dst, day=1, hour=day_end_hour, minute=0, second=0) if day_end_hour >= 12: # month actually starts on the last day of previous month start -= DAY del monthly_data[start:] stop = daily_data.before(datetime.max) if stop is None: return None acc = MonthAcc(rain_day_threshold) def monthlygen(inputdata): """Internal generator function""" month_start = start count = 0 while month_start <= stop: count += 1 if count % 12 == 0: logger.info("monthly: %s", month_start.isoformat(' ')) else: logger.debug("monthly: %s", month_start.isoformat(' ')) month_end = month_start + WEEK if month_end.month < 12: month_end = month_end.replace(month=month_end.month+1) else: month_end = month_end.replace(month=1, year=month_end.year+1) month_end = month_end - WEEK if use_dst: # month might straddle summer time start or end month_end = timezone.local_replace( month_end + HOURx3, use_dst=use_dst, hour=day_end_hour) acc.reset() for data in inputdata[month_start:month_end]: acc.add_daily(data) new_data = acc.result() if new_data: new_data['start'] = month_start yield new_data month_start = month_end monthly_data.update(monthlygen(daily_data)) return start
[ "def", "generate_monthly", "(", "rain_day_threshold", ",", "day_end_hour", ",", "use_dst", ",", "daily_data", ",", "monthly_data", ",", "process_from", ")", ":", "start", "=", "monthly_data", ".", "before", "(", "datetime", ".", "max", ")", "if", "start", "is"...
Generate monthly summaries from daily data.
[ "Generate", "monthly", "summaries", "from", "daily", "data", "." ]
4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/process.py#L674-L728
train
36,379
jim-easterbrook/pywws
src/pywws/process.py
process_data
def process_data(context): """Generate summaries from raw weather station data. The meteorological day end (typically 2100 or 0900 local time) is set in the preferences file ``weather.ini``. The default value is 2100 (2200 during DST), following the historical convention for weather station readings. """ logger.info('Generating summary data') # get time of last record last_raw = context.raw_data.before(datetime.max) if last_raw is None: raise IOError('No data found. Check data directory parameter.') # get daytime end hour (in local time) day_end_hour, use_dst = get_day_end_hour(context.params) # get other config rain_day_threshold = float( context.params.get('config', 'rain day threshold', '0.2')) # calibrate raw data start = calibrate_data(context.params, context.raw_data, context.calib_data) # generate hourly data start = generate_hourly(context.calib_data, context.hourly_data, start) # generate daily data start = generate_daily(day_end_hour, use_dst, context.calib_data, context.hourly_data, context.daily_data, start) # generate monthly data generate_monthly(rain_day_threshold, day_end_hour, use_dst, context.daily_data, context.monthly_data, start) return 0
python
def process_data(context): """Generate summaries from raw weather station data. The meteorological day end (typically 2100 or 0900 local time) is set in the preferences file ``weather.ini``. The default value is 2100 (2200 during DST), following the historical convention for weather station readings. """ logger.info('Generating summary data') # get time of last record last_raw = context.raw_data.before(datetime.max) if last_raw is None: raise IOError('No data found. Check data directory parameter.') # get daytime end hour (in local time) day_end_hour, use_dst = get_day_end_hour(context.params) # get other config rain_day_threshold = float( context.params.get('config', 'rain day threshold', '0.2')) # calibrate raw data start = calibrate_data(context.params, context.raw_data, context.calib_data) # generate hourly data start = generate_hourly(context.calib_data, context.hourly_data, start) # generate daily data start = generate_daily(day_end_hour, use_dst, context.calib_data, context.hourly_data, context.daily_data, start) # generate monthly data generate_monthly(rain_day_threshold, day_end_hour, use_dst, context.daily_data, context.monthly_data, start) return 0
[ "def", "process_data", "(", "context", ")", ":", "logger", ".", "info", "(", "'Generating summary data'", ")", "# get time of last record", "last_raw", "=", "context", ".", "raw_data", ".", "before", "(", "datetime", ".", "max", ")", "if", "last_raw", "is", "N...
Generate summaries from raw weather station data. The meteorological day end (typically 2100 or 0900 local time) is set in the preferences file ``weather.ini``. The default value is 2100 (2200 during DST), following the historical convention for weather station readings.
[ "Generate", "summaries", "from", "raw", "weather", "station", "data", "." ]
4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/process.py#L739-L768
train
36,380
jim-easterbrook/pywws
src/pywws/filedata.py
CoreStore.before
def before(self, idx): """Return datetime of newest existing data record whose datetime is < idx. Might not even be in the same year! If no such record exists, return None.""" if not isinstance(idx, datetime): raise TypeError("'%s' is not %s" % (idx, datetime)) day = min(idx.date(), self._hi_limit - DAY) while day >= self._lo_limit: if day < self._rd_cache.lo or day >= self._rd_cache.hi: self._load(self._rd_cache, day) self._rd_cache.set_ptr(idx) if self._rd_cache.ptr > 0: return self._rd_cache.data[self._rd_cache.ptr - 1]['idx'] day = self._rd_cache.lo - DAY return None
python
def before(self, idx): """Return datetime of newest existing data record whose datetime is < idx. Might not even be in the same year! If no such record exists, return None.""" if not isinstance(idx, datetime): raise TypeError("'%s' is not %s" % (idx, datetime)) day = min(idx.date(), self._hi_limit - DAY) while day >= self._lo_limit: if day < self._rd_cache.lo or day >= self._rd_cache.hi: self._load(self._rd_cache, day) self._rd_cache.set_ptr(idx) if self._rd_cache.ptr > 0: return self._rd_cache.data[self._rd_cache.ptr - 1]['idx'] day = self._rd_cache.lo - DAY return None
[ "def", "before", "(", "self", ",", "idx", ")", ":", "if", "not", "isinstance", "(", "idx", ",", "datetime", ")", ":", "raise", "TypeError", "(", "\"'%s' is not %s\"", "%", "(", "idx", ",", "datetime", ")", ")", "day", "=", "min", "(", "idx", ".", "...
Return datetime of newest existing data record whose datetime is < idx. Might not even be in the same year! If no such record exists, return None.
[ "Return", "datetime", "of", "newest", "existing", "data", "record", "whose", "datetime", "is", "<", "idx", "." ]
4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/filedata.py#L282-L298
train
36,381
jim-easterbrook/pywws
src/pywws/filedata.py
CoreStore.after
def after(self, idx): """Return datetime of oldest existing data record whose datetime is >= idx. Might not even be in the same year! If no such record exists, return None.""" if not isinstance(idx, datetime): raise TypeError("'%s' is not %s" % (idx, datetime)) day = max(idx.date(), self._lo_limit) while day < self._hi_limit: if day < self._rd_cache.lo or day >= self._rd_cache.hi: self._load(self._rd_cache, day) self._rd_cache.set_ptr(idx) if self._rd_cache.ptr < len(self._rd_cache.data): return self._rd_cache.data[self._rd_cache.ptr]['idx'] day = self._rd_cache.hi return None
python
def after(self, idx): """Return datetime of oldest existing data record whose datetime is >= idx. Might not even be in the same year! If no such record exists, return None.""" if not isinstance(idx, datetime): raise TypeError("'%s' is not %s" % (idx, datetime)) day = max(idx.date(), self._lo_limit) while day < self._hi_limit: if day < self._rd_cache.lo or day >= self._rd_cache.hi: self._load(self._rd_cache, day) self._rd_cache.set_ptr(idx) if self._rd_cache.ptr < len(self._rd_cache.data): return self._rd_cache.data[self._rd_cache.ptr]['idx'] day = self._rd_cache.hi return None
[ "def", "after", "(", "self", ",", "idx", ")", ":", "if", "not", "isinstance", "(", "idx", ",", "datetime", ")", ":", "raise", "TypeError", "(", "\"'%s' is not %s\"", "%", "(", "idx", ",", "datetime", ")", ")", "day", "=", "max", "(", "idx", ".", "d...
Return datetime of oldest existing data record whose datetime is >= idx. Might not even be in the same year! If no such record exists, return None.
[ "Return", "datetime", "of", "oldest", "existing", "data", "record", "whose", "datetime", "is", ">", "=", "idx", "." ]
4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/filedata.py#L300-L316
train
36,382
jim-easterbrook/pywws
src/pywws/filedata.py
CoreStore.nearest
def nearest(self, idx): """Return datetime of record whose datetime is nearest idx.""" hi = self.after(idx) lo = self.before(idx) if hi is None: return lo if lo is None: return hi if abs(hi - idx) < abs(lo - idx): return hi return lo
python
def nearest(self, idx): """Return datetime of record whose datetime is nearest idx.""" hi = self.after(idx) lo = self.before(idx) if hi is None: return lo if lo is None: return hi if abs(hi - idx) < abs(lo - idx): return hi return lo
[ "def", "nearest", "(", "self", ",", "idx", ")", ":", "hi", "=", "self", ".", "after", "(", "idx", ")", "lo", "=", "self", ".", "before", "(", "idx", ")", "if", "hi", "is", "None", ":", "return", "lo", "if", "lo", "is", "None", ":", "return", ...
Return datetime of record whose datetime is nearest idx.
[ "Return", "datetime", "of", "record", "whose", "datetime", "is", "nearest", "idx", "." ]
4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/filedata.py#L318-L328
train
36,383
jim-easterbrook/pywws
src/pywws/filedata.py
CoreStore.clear
def clear(self): """Clears all data from the data store permanently""" for root, dirs, files in os.walk(self._root_dir, topdown=False): for file in files: os.unlink(os.path.join(root, file)) os.rmdir(root) # Get the root dir back and re-initialise to start again root_dir = os.path.abspath( os.path.join(self._root_dir, os.pardir)) self.__init__(root_dir)
python
def clear(self): """Clears all data from the data store permanently""" for root, dirs, files in os.walk(self._root_dir, topdown=False): for file in files: os.unlink(os.path.join(root, file)) os.rmdir(root) # Get the root dir back and re-initialise to start again root_dir = os.path.abspath( os.path.join(self._root_dir, os.pardir)) self.__init__(root_dir)
[ "def", "clear", "(", "self", ")", ":", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "self", ".", "_root_dir", ",", "topdown", "=", "False", ")", ":", "for", "file", "in", "files", ":", "os", ".", "unlink", "(", "os", ...
Clears all data from the data store permanently
[ "Clears", "all", "data", "from", "the", "data", "store", "permanently" ]
4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/filedata.py#L427-L436
train
36,384
jim-easterbrook/pywws
src/pywws/conversions.py
pressure_trend_text
def pressure_trend_text(trend): """Convert pressure trend to a string, as used by the UK met office. """ _ = pywws.localisation.translation.ugettext if trend > 6.0: return _(u'rising very rapidly') elif trend > 3.5: return _(u'rising quickly') elif trend > 1.5: return _(u'rising') elif trend >= 0.1: return _(u'rising slowly') elif trend < -6.0: return _(u'falling very rapidly') elif trend < -3.5: return _(u'falling quickly') elif trend < -1.5: return _(u'falling') elif trend <= -0.1: return _(u'falling slowly') return _(u'steady')
python
def pressure_trend_text(trend): """Convert pressure trend to a string, as used by the UK met office. """ _ = pywws.localisation.translation.ugettext if trend > 6.0: return _(u'rising very rapidly') elif trend > 3.5: return _(u'rising quickly') elif trend > 1.5: return _(u'rising') elif trend >= 0.1: return _(u'rising slowly') elif trend < -6.0: return _(u'falling very rapidly') elif trend < -3.5: return _(u'falling quickly') elif trend < -1.5: return _(u'falling') elif trend <= -0.1: return _(u'falling slowly') return _(u'steady')
[ "def", "pressure_trend_text", "(", "trend", ")", ":", "_", "=", "pywws", ".", "localisation", ".", "translation", ".", "ugettext", "if", "trend", ">", "6.0", ":", "return", "_", "(", "u'rising very rapidly'", ")", "elif", "trend", ">", "3.5", ":", "return"...
Convert pressure trend to a string, as used by the UK met office.
[ "Convert", "pressure", "trend", "to", "a", "string", "as", "used", "by", "the", "UK", "met", "office", "." ]
4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/conversions.py#L50-L72
train
36,385
jim-easterbrook/pywws
src/pywws/conversions.py
winddir_text
def winddir_text(pts): "Convert wind direction from 0..15 to compass point text" global _winddir_text_array if pts is None: return None if not isinstance(pts, int): pts = int(pts + 0.5) % 16 if not _winddir_text_array: _ = pywws.localisation.translation.ugettext _winddir_text_array = ( _(u'N'), _(u'NNE'), _(u'NE'), _(u'ENE'), _(u'E'), _(u'ESE'), _(u'SE'), _(u'SSE'), _(u'S'), _(u'SSW'), _(u'SW'), _(u'WSW'), _(u'W'), _(u'WNW'), _(u'NW'), _(u'NNW'), ) return _winddir_text_array[pts]
python
def winddir_text(pts): "Convert wind direction from 0..15 to compass point text" global _winddir_text_array if pts is None: return None if not isinstance(pts, int): pts = int(pts + 0.5) % 16 if not _winddir_text_array: _ = pywws.localisation.translation.ugettext _winddir_text_array = ( _(u'N'), _(u'NNE'), _(u'NE'), _(u'ENE'), _(u'E'), _(u'ESE'), _(u'SE'), _(u'SSE'), _(u'S'), _(u'SSW'), _(u'SW'), _(u'WSW'), _(u'W'), _(u'WNW'), _(u'NW'), _(u'NNW'), ) return _winddir_text_array[pts]
[ "def", "winddir_text", "(", "pts", ")", ":", "global", "_winddir_text_array", "if", "pts", "is", "None", ":", "return", "None", "if", "not", "isinstance", "(", "pts", ",", "int", ")", ":", "pts", "=", "int", "(", "pts", "+", "0.5", ")", "%", "16", ...
Convert wind direction from 0..15 to compass point text
[ "Convert", "wind", "direction", "from", "0", "..", "15", "to", "compass", "point", "text" ]
4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/conversions.py#L149-L164
train
36,386
jim-easterbrook/pywws
src/pywws/conversions.py
wind_bft
def wind_bft(ms): "Convert wind from metres per second to Beaufort scale" if ms is None: return None for bft in range(len(_bft_threshold)): if ms < _bft_threshold[bft]: return bft return len(_bft_threshold)
python
def wind_bft(ms): "Convert wind from metres per second to Beaufort scale" if ms is None: return None for bft in range(len(_bft_threshold)): if ms < _bft_threshold[bft]: return bft return len(_bft_threshold)
[ "def", "wind_bft", "(", "ms", ")", ":", "if", "ms", "is", "None", ":", "return", "None", "for", "bft", "in", "range", "(", "len", "(", "_bft_threshold", ")", ")", ":", "if", "ms", "<", "_bft_threshold", "[", "bft", "]", ":", "return", "bft", "retur...
Convert wind from metres per second to Beaufort scale
[ "Convert", "wind", "from", "metres", "per", "second", "to", "Beaufort", "scale" ]
4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/conversions.py#L181-L188
train
36,387
jim-easterbrook/pywws
src/pywws/conversions.py
cadhumidex
def cadhumidex(temp, humidity): "Calculate Humidity Index as per Canadian Weather Standards" if temp is None or humidity is None: return None # Formulas are adapted to not use e^(...) with no appreciable # change in accuracy (0.0227%) saturation_pressure = (6.112 * (10.0**(7.5 * temp / (237.7 + temp))) * float(humidity) / 100.0) return temp + (0.555 * (saturation_pressure - 10.0))
python
def cadhumidex(temp, humidity): "Calculate Humidity Index as per Canadian Weather Standards" if temp is None or humidity is None: return None # Formulas are adapted to not use e^(...) with no appreciable # change in accuracy (0.0227%) saturation_pressure = (6.112 * (10.0**(7.5 * temp / (237.7 + temp))) * float(humidity) / 100.0) return temp + (0.555 * (saturation_pressure - 10.0))
[ "def", "cadhumidex", "(", "temp", ",", "humidity", ")", ":", "if", "temp", "is", "None", "or", "humidity", "is", "None", ":", "return", "None", "# Formulas are adapted to not use e^(...) with no appreciable", "# change in accuracy (0.0227%)", "saturation_pressure", "=", ...
Calculate Humidity Index as per Canadian Weather Standards
[ "Calculate", "Humidity", "Index", "as", "per", "Canadian", "Weather", "Standards" ]
4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/conversions.py#L202-L210
train
36,388
jim-easterbrook/pywws
src/pywws/conversions.py
usaheatindex
def usaheatindex(temp, humidity, dew=None): """Calculate Heat Index as per USA National Weather Service Standards See http://en.wikipedia.org/wiki/Heat_index, formula 1. The formula is not valid for T < 26.7C, Dew Point < 12C, or RH < 40% """ if temp is None or humidity is None: return None if dew is None: dew = dew_point(temp, humidity) if temp < 26.7 or humidity < 40 or dew < 12.0: return temp T = (temp * 1.8) + 32.0 R = humidity c_1 = -42.379 c_2 = 2.04901523 c_3 = 10.14333127 c_4 = -0.22475541 c_5 = -0.00683783 c_6 = -0.05481717 c_7 = 0.00122874 c_8 = 0.00085282 c_9 = -0.00000199 return ((c_1 + (c_2 * T) + (c_3 * R) + (c_4 * T * R) + (c_5 * (T**2)) + (c_6 * (R**2)) + (c_7 * (T**2) * R) + (c_8 * T * (R**2)) + (c_9 * (T**2) * (R**2))) - 32.0) / 1.8
python
def usaheatindex(temp, humidity, dew=None): """Calculate Heat Index as per USA National Weather Service Standards See http://en.wikipedia.org/wiki/Heat_index, formula 1. The formula is not valid for T < 26.7C, Dew Point < 12C, or RH < 40% """ if temp is None or humidity is None: return None if dew is None: dew = dew_point(temp, humidity) if temp < 26.7 or humidity < 40 or dew < 12.0: return temp T = (temp * 1.8) + 32.0 R = humidity c_1 = -42.379 c_2 = 2.04901523 c_3 = 10.14333127 c_4 = -0.22475541 c_5 = -0.00683783 c_6 = -0.05481717 c_7 = 0.00122874 c_8 = 0.00085282 c_9 = -0.00000199 return ((c_1 + (c_2 * T) + (c_3 * R) + (c_4 * T * R) + (c_5 * (T**2)) + (c_6 * (R**2)) + (c_7 * (T**2) * R) + (c_8 * T * (R**2)) + (c_9 * (T**2) * (R**2))) - 32.0) / 1.8
[ "def", "usaheatindex", "(", "temp", ",", "humidity", ",", "dew", "=", "None", ")", ":", "if", "temp", "is", "None", "or", "humidity", "is", "None", ":", "return", "None", "if", "dew", "is", "None", ":", "dew", "=", "dew_point", "(", "temp", ",", "h...
Calculate Heat Index as per USA National Weather Service Standards See http://en.wikipedia.org/wiki/Heat_index, formula 1. The formula is not valid for T < 26.7C, Dew Point < 12C, or RH < 40%
[ "Calculate", "Heat", "Index", "as", "per", "USA", "National", "Weather", "Service", "Standards" ]
4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/conversions.py#L212-L238
train
36,389
ppaquette/gym-pull
gym_pull/package/manager.py
PackageManager.load_user_envs
def load_user_envs(self): """ Loads downloaded user envs from filesystem cache on `import gym` """ installed_packages = self._list_packages() # Tagging core envs gym_package = 'gym ({})'.format(installed_packages['gym']) if 'gym' in installed_packages else 'gym' core_specs = registry.all() for spec in core_specs: spec.source = 'OpenAI Gym Core Package' spec.package = gym_package # Loading user envs if not os.path.isfile(self.cache_path): return with open(self.cache_path) as cache: for line in cache: user_package, registered_envs = self._load_package(line.rstrip('\n'), installed_packages) if logger.level <= logging.DEBUG: logger.debug('Installed %d user environments from package "%s"', len(registered_envs), user_package['name']) if self.cache_needs_update: self._update_cache() if len(self.env_ids) > 0: logger.info('Found and registered %d user environments.', len(self.env_ids))
python
def load_user_envs(self): """ Loads downloaded user envs from filesystem cache on `import gym` """ installed_packages = self._list_packages() # Tagging core envs gym_package = 'gym ({})'.format(installed_packages['gym']) if 'gym' in installed_packages else 'gym' core_specs = registry.all() for spec in core_specs: spec.source = 'OpenAI Gym Core Package' spec.package = gym_package # Loading user envs if not os.path.isfile(self.cache_path): return with open(self.cache_path) as cache: for line in cache: user_package, registered_envs = self._load_package(line.rstrip('\n'), installed_packages) if logger.level <= logging.DEBUG: logger.debug('Installed %d user environments from package "%s"', len(registered_envs), user_package['name']) if self.cache_needs_update: self._update_cache() if len(self.env_ids) > 0: logger.info('Found and registered %d user environments.', len(self.env_ids))
[ "def", "load_user_envs", "(", "self", ")", ":", "installed_packages", "=", "self", ".", "_list_packages", "(", ")", "# Tagging core envs", "gym_package", "=", "'gym ({})'", ".", "format", "(", "installed_packages", "[", "'gym'", "]", ")", "if", "'gym'", "in", ...
Loads downloaded user envs from filesystem cache on `import gym`
[ "Loads", "downloaded", "user", "envs", "from", "filesystem", "cache", "on", "import", "gym" ]
5b2797fd081ba5be26544983d1eba764e6d9f73b
https://github.com/ppaquette/gym-pull/blob/5b2797fd081ba5be26544983d1eba764e6d9f73b/gym_pull/package/manager.py#L31-L53
train
36,390
ppaquette/gym-pull
gym_pull/envs/registration.py
EnvSpec.make
def make(self): """Instantiates an instance of the environment with appropriate kwargs""" if self._entry_point is None: raise error.Error('Attempting to make deprecated env {}. (HINT: is there a newer registered version of this env?)'.format(self.id)) cls = load(self._entry_point) env = cls(**self._kwargs) # Make the enviroment aware of which spec it came from. env.spec = self env = env.build(extra_wrappers=self._wrappers) return env
python
def make(self): """Instantiates an instance of the environment with appropriate kwargs""" if self._entry_point is None: raise error.Error('Attempting to make deprecated env {}. (HINT: is there a newer registered version of this env?)'.format(self.id)) cls = load(self._entry_point) env = cls(**self._kwargs) # Make the enviroment aware of which spec it came from. env.spec = self env = env.build(extra_wrappers=self._wrappers) return env
[ "def", "make", "(", "self", ")", ":", "if", "self", ".", "_entry_point", "is", "None", ":", "raise", "error", ".", "Error", "(", "'Attempting to make deprecated env {}. (HINT: is there a newer registered version of this env?)'", ".", "format", "(", "self", ".", "id", ...
Instantiates an instance of the environment with appropriate kwargs
[ "Instantiates", "an", "instance", "of", "the", "environment", "with", "appropriate", "kwargs" ]
5b2797fd081ba5be26544983d1eba764e6d9f73b
https://github.com/ppaquette/gym-pull/blob/5b2797fd081ba5be26544983d1eba764e6d9f73b/gym_pull/envs/registration.py#L63-L75
train
36,391
jingw/pyhdfs
pyhdfs.py
HdfsClient._record_last_active
def _record_last_active(self, host): """Put host first in our host list, so we try it first next time The implementation of get_active_namenode relies on this reordering. """ if host in self.hosts: # this check is for when user passes a host at request time # Keep this thread safe: set hosts atomically and update it before the timestamp self.hosts = [host] + [h for h in self.hosts if h != host] self._last_time_recorded_active = time.time()
python
def _record_last_active(self, host): """Put host first in our host list, so we try it first next time The implementation of get_active_namenode relies on this reordering. """ if host in self.hosts: # this check is for when user passes a host at request time # Keep this thread safe: set hosts atomically and update it before the timestamp self.hosts = [host] + [h for h in self.hosts if h != host] self._last_time_recorded_active = time.time()
[ "def", "_record_last_active", "(", "self", ",", "host", ")", ":", "if", "host", "in", "self", ".", "hosts", ":", "# this check is for when user passes a host at request time", "# Keep this thread safe: set hosts atomically and update it before the timestamp", "self", ".", "host...
Put host first in our host list, so we try it first next time The implementation of get_active_namenode relies on this reordering.
[ "Put", "host", "first", "in", "our", "host", "list", "so", "we", "try", "it", "first", "next", "time" ]
b382b34f7cb28b41559f5be73102beb1732cd933
https://github.com/jingw/pyhdfs/blob/b382b34f7cb28b41559f5be73102beb1732cd933/pyhdfs.py#L342-L350
train
36,392
jingw/pyhdfs
pyhdfs.py
HdfsClient._request
def _request(self, method, path, op, expected_status=httplib.OK, **kwargs): """Make a WebHDFS request against the NameNodes This function handles NameNode failover and error checking. All kwargs are passed as query params to the WebHDFS server. """ hosts, path = self._parse_path(path) _transform_user_name_key(kwargs) kwargs.setdefault('user.name', self.user_name) formatted_args = ' '.join('{}={}'.format(*t) for t in kwargs.items()) _logger.info("%s %s %s %s", op, path, formatted_args, ','.join(hosts)) kwargs['op'] = op for i in range(self.max_tries): log_level = logging.DEBUG if i < self.max_tries - 1 else logging.WARNING for host in hosts: try: response = self._requests_session.request( method, 'http://{}{}{}'.format(host, WEBHDFS_PATH, url_quote(path.encode('utf-8'))), params=kwargs, timeout=self.timeout, allow_redirects=False, **self._requests_kwargs ) except (requests.exceptions.ConnectionError, requests.exceptions.Timeout): _logger.log(log_level, "Failed to reach to %s (attempt %d/%d)", host, i + 1, self.max_tries, exc_info=True) continue try: _check_response(response, expected_status) except (HdfsRetriableException, HdfsStandbyException): _logger.log(log_level, "%s is in startup or standby mode (attempt %d/%d)", host, i + 1, self.max_tries, exc_info=True) continue # Note: standby NN can still return basic validation errors, so non-StandbyException # does not necessarily mean we have the active NN. self._record_last_active(host) return response if i != self.max_tries - 1: time.sleep(self.retry_delay) raise HdfsNoServerException("Could not use any of the given hosts")
python
def _request(self, method, path, op, expected_status=httplib.OK, **kwargs): """Make a WebHDFS request against the NameNodes This function handles NameNode failover and error checking. All kwargs are passed as query params to the WebHDFS server. """ hosts, path = self._parse_path(path) _transform_user_name_key(kwargs) kwargs.setdefault('user.name', self.user_name) formatted_args = ' '.join('{}={}'.format(*t) for t in kwargs.items()) _logger.info("%s %s %s %s", op, path, formatted_args, ','.join(hosts)) kwargs['op'] = op for i in range(self.max_tries): log_level = logging.DEBUG if i < self.max_tries - 1 else logging.WARNING for host in hosts: try: response = self._requests_session.request( method, 'http://{}{}{}'.format(host, WEBHDFS_PATH, url_quote(path.encode('utf-8'))), params=kwargs, timeout=self.timeout, allow_redirects=False, **self._requests_kwargs ) except (requests.exceptions.ConnectionError, requests.exceptions.Timeout): _logger.log(log_level, "Failed to reach to %s (attempt %d/%d)", host, i + 1, self.max_tries, exc_info=True) continue try: _check_response(response, expected_status) except (HdfsRetriableException, HdfsStandbyException): _logger.log(log_level, "%s is in startup or standby mode (attempt %d/%d)", host, i + 1, self.max_tries, exc_info=True) continue # Note: standby NN can still return basic validation errors, so non-StandbyException # does not necessarily mean we have the active NN. self._record_last_active(host) return response if i != self.max_tries - 1: time.sleep(self.retry_delay) raise HdfsNoServerException("Could not use any of the given hosts")
[ "def", "_request", "(", "self", ",", "method", ",", "path", ",", "op", ",", "expected_status", "=", "httplib", ".", "OK", ",", "*", "*", "kwargs", ")", ":", "hosts", ",", "path", "=", "self", ".", "_parse_path", "(", "path", ")", "_transform_user_name_...
Make a WebHDFS request against the NameNodes This function handles NameNode failover and error checking. All kwargs are passed as query params to the WebHDFS server.
[ "Make", "a", "WebHDFS", "request", "against", "the", "NameNodes" ]
b382b34f7cb28b41559f5be73102beb1732cd933
https://github.com/jingw/pyhdfs/blob/b382b34f7cb28b41559f5be73102beb1732cd933/pyhdfs.py#L352-L391
train
36,393
jingw/pyhdfs
pyhdfs.py
HdfsClient.create
def create(self, path, data, **kwargs): """Create a file at the given path. :param data: ``bytes`` or a ``file``-like object to upload :param overwrite: If a file already exists, should it be overwritten? :type overwrite: bool :param blocksize: The block size of a file. :type blocksize: long :param replication: The number of replications of a file. :type replication: short :param permission: The permission of a file/directory. Any radix-8 integer (leading zeros may be omitted.) :type permission: octal :param buffersize: The size of the buffer used in transferring data. :type buffersize: int """ metadata_response = self._put( path, 'CREATE', expected_status=httplib.TEMPORARY_REDIRECT, **kwargs) assert not metadata_response.content data_response = self._requests_session.put( metadata_response.headers['location'], data=data, **self._requests_kwargs) _check_response(data_response, expected_status=httplib.CREATED) assert not data_response.content
python
def create(self, path, data, **kwargs): """Create a file at the given path. :param data: ``bytes`` or a ``file``-like object to upload :param overwrite: If a file already exists, should it be overwritten? :type overwrite: bool :param blocksize: The block size of a file. :type blocksize: long :param replication: The number of replications of a file. :type replication: short :param permission: The permission of a file/directory. Any radix-8 integer (leading zeros may be omitted.) :type permission: octal :param buffersize: The size of the buffer used in transferring data. :type buffersize: int """ metadata_response = self._put( path, 'CREATE', expected_status=httplib.TEMPORARY_REDIRECT, **kwargs) assert not metadata_response.content data_response = self._requests_session.put( metadata_response.headers['location'], data=data, **self._requests_kwargs) _check_response(data_response, expected_status=httplib.CREATED) assert not data_response.content
[ "def", "create", "(", "self", ",", "path", ",", "data", ",", "*", "*", "kwargs", ")", ":", "metadata_response", "=", "self", ".", "_put", "(", "path", ",", "'CREATE'", ",", "expected_status", "=", "httplib", ".", "TEMPORARY_REDIRECT", ",", "*", "*", "k...
Create a file at the given path. :param data: ``bytes`` or a ``file``-like object to upload :param overwrite: If a file already exists, should it be overwritten? :type overwrite: bool :param blocksize: The block size of a file. :type blocksize: long :param replication: The number of replications of a file. :type replication: short :param permission: The permission of a file/directory. Any radix-8 integer (leading zeros may be omitted.) :type permission: octal :param buffersize: The size of the buffer used in transferring data. :type buffersize: int
[ "Create", "a", "file", "at", "the", "given", "path", "." ]
b382b34f7cb28b41559f5be73102beb1732cd933
https://github.com/jingw/pyhdfs/blob/b382b34f7cb28b41559f5be73102beb1732cd933/pyhdfs.py#L409-L431
train
36,394
jingw/pyhdfs
pyhdfs.py
HdfsClient.append
def append(self, path, data, **kwargs): """Append to the given file. :param data: ``bytes`` or a ``file``-like object :param buffersize: The size of the buffer used in transferring data. :type buffersize: int """ metadata_response = self._post( path, 'APPEND', expected_status=httplib.TEMPORARY_REDIRECT, **kwargs) data_response = self._requests_session.post( metadata_response.headers['location'], data=data, **self._requests_kwargs) _check_response(data_response) assert not data_response.content
python
def append(self, path, data, **kwargs): """Append to the given file. :param data: ``bytes`` or a ``file``-like object :param buffersize: The size of the buffer used in transferring data. :type buffersize: int """ metadata_response = self._post( path, 'APPEND', expected_status=httplib.TEMPORARY_REDIRECT, **kwargs) data_response = self._requests_session.post( metadata_response.headers['location'], data=data, **self._requests_kwargs) _check_response(data_response) assert not data_response.content
[ "def", "append", "(", "self", ",", "path", ",", "data", ",", "*", "*", "kwargs", ")", ":", "metadata_response", "=", "self", ".", "_post", "(", "path", ",", "'APPEND'", ",", "expected_status", "=", "httplib", ".", "TEMPORARY_REDIRECT", ",", "*", "*", "...
Append to the given file. :param data: ``bytes`` or a ``file``-like object :param buffersize: The size of the buffer used in transferring data. :type buffersize: int
[ "Append", "to", "the", "given", "file", "." ]
b382b34f7cb28b41559f5be73102beb1732cd933
https://github.com/jingw/pyhdfs/blob/b382b34f7cb28b41559f5be73102beb1732cd933/pyhdfs.py#L433-L445
train
36,395
jingw/pyhdfs
pyhdfs.py
HdfsClient.concat
def concat(self, target, sources, **kwargs): """Concat existing files together. For preconditions, see https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/filesystem/filesystem.html#void_concatPath_p_Path_sources :param target: the path to the target destination. :param sources: the paths to the sources to use for the concatenation. :type sources: list """ if isinstance(sources, basestring): raise ValueError("sources should be a list") if any(',' in s for s in sources): raise NotImplementedError("WebHDFS does not support commas in concat") response = self._post(target, 'CONCAT', sources=','.join(sources), **kwargs) assert not response.content
python
def concat(self, target, sources, **kwargs): """Concat existing files together. For preconditions, see https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/filesystem/filesystem.html#void_concatPath_p_Path_sources :param target: the path to the target destination. :param sources: the paths to the sources to use for the concatenation. :type sources: list """ if isinstance(sources, basestring): raise ValueError("sources should be a list") if any(',' in s for s in sources): raise NotImplementedError("WebHDFS does not support commas in concat") response = self._post(target, 'CONCAT', sources=','.join(sources), **kwargs) assert not response.content
[ "def", "concat", "(", "self", ",", "target", ",", "sources", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "sources", ",", "basestring", ")", ":", "raise", "ValueError", "(", "\"sources should be a list\"", ")", "if", "any", "(", "','", "in...
Concat existing files together. For preconditions, see https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/filesystem/filesystem.html#void_concatPath_p_Path_sources :param target: the path to the target destination. :param sources: the paths to the sources to use for the concatenation. :type sources: list
[ "Concat", "existing", "files", "together", "." ]
b382b34f7cb28b41559f5be73102beb1732cd933
https://github.com/jingw/pyhdfs/blob/b382b34f7cb28b41559f5be73102beb1732cd933/pyhdfs.py#L447-L462
train
36,396
jingw/pyhdfs
pyhdfs.py
HdfsClient.open
def open(self, path, **kwargs): """Return a file-like object for reading the given HDFS path. :param offset: The starting byte position. :type offset: long :param length: The number of bytes to be processed. :type length: long :param buffersize: The size of the buffer used in transferring data. :type buffersize: int :rtype: file-like object """ metadata_response = self._get( path, 'OPEN', expected_status=httplib.TEMPORARY_REDIRECT, **kwargs) data_response = self._requests_session.get( metadata_response.headers['location'], stream=True, **self._requests_kwargs) _check_response(data_response) return data_response.raw
python
def open(self, path, **kwargs): """Return a file-like object for reading the given HDFS path. :param offset: The starting byte position. :type offset: long :param length: The number of bytes to be processed. :type length: long :param buffersize: The size of the buffer used in transferring data. :type buffersize: int :rtype: file-like object """ metadata_response = self._get( path, 'OPEN', expected_status=httplib.TEMPORARY_REDIRECT, **kwargs) data_response = self._requests_session.get( metadata_response.headers['location'], stream=True, **self._requests_kwargs) _check_response(data_response) return data_response.raw
[ "def", "open", "(", "self", ",", "path", ",", "*", "*", "kwargs", ")", ":", "metadata_response", "=", "self", ".", "_get", "(", "path", ",", "'OPEN'", ",", "expected_status", "=", "httplib", ".", "TEMPORARY_REDIRECT", ",", "*", "*", "kwargs", ")", "dat...
Return a file-like object for reading the given HDFS path. :param offset: The starting byte position. :type offset: long :param length: The number of bytes to be processed. :type length: long :param buffersize: The size of the buffer used in transferring data. :type buffersize: int :rtype: file-like object
[ "Return", "a", "file", "-", "like", "object", "for", "reading", "the", "given", "HDFS", "path", "." ]
b382b34f7cb28b41559f5be73102beb1732cd933
https://github.com/jingw/pyhdfs/blob/b382b34f7cb28b41559f5be73102beb1732cd933/pyhdfs.py#L464-L480
train
36,397
jingw/pyhdfs
pyhdfs.py
HdfsClient.create_symlink
def create_symlink(self, link, destination, **kwargs): """Create a symbolic link at ``link`` pointing to ``destination``. :param link: the path to be created that points to target :param destination: the target of the symbolic link :param createParent: If the parent directories do not exist, should they be created? :type createParent: bool :raises HdfsUnsupportedOperationException: This feature doesn't actually work, at least on CDH 5.3.0. """ response = self._put(link, 'CREATESYMLINK', destination=destination, **kwargs) assert not response.content
python
def create_symlink(self, link, destination, **kwargs): """Create a symbolic link at ``link`` pointing to ``destination``. :param link: the path to be created that points to target :param destination: the target of the symbolic link :param createParent: If the parent directories do not exist, should they be created? :type createParent: bool :raises HdfsUnsupportedOperationException: This feature doesn't actually work, at least on CDH 5.3.0. """ response = self._put(link, 'CREATESYMLINK', destination=destination, **kwargs) assert not response.content
[ "def", "create_symlink", "(", "self", ",", "link", ",", "destination", ",", "*", "*", "kwargs", ")", ":", "response", "=", "self", ".", "_put", "(", "link", ",", "'CREATESYMLINK'", ",", "destination", "=", "destination", ",", "*", "*", "kwargs", ")", "...
Create a symbolic link at ``link`` pointing to ``destination``. :param link: the path to be created that points to target :param destination: the target of the symbolic link :param createParent: If the parent directories do not exist, should they be created? :type createParent: bool :raises HdfsUnsupportedOperationException: This feature doesn't actually work, at least on CDH 5.3.0.
[ "Create", "a", "symbolic", "link", "at", "link", "pointing", "to", "destination", "." ]
b382b34f7cb28b41559f5be73102beb1732cd933
https://github.com/jingw/pyhdfs/blob/b382b34f7cb28b41559f5be73102beb1732cd933/pyhdfs.py#L496-L507
train
36,398
jingw/pyhdfs
pyhdfs.py
HdfsClient.rename
def rename(self, path, destination, **kwargs): """Renames Path src to Path dst. :returns: true if rename is successful :rtype: bool """ return _json(self._put(path, 'RENAME', destination=destination, **kwargs))['boolean']
python
def rename(self, path, destination, **kwargs): """Renames Path src to Path dst. :returns: true if rename is successful :rtype: bool """ return _json(self._put(path, 'RENAME', destination=destination, **kwargs))['boolean']
[ "def", "rename", "(", "self", ",", "path", ",", "destination", ",", "*", "*", "kwargs", ")", ":", "return", "_json", "(", "self", ".", "_put", "(", "path", ",", "'RENAME'", ",", "destination", "=", "destination", ",", "*", "*", "kwargs", ")", ")", ...
Renames Path src to Path dst. :returns: true if rename is successful :rtype: bool
[ "Renames", "Path", "src", "to", "Path", "dst", "." ]
b382b34f7cb28b41559f5be73102beb1732cd933
https://github.com/jingw/pyhdfs/blob/b382b34f7cb28b41559f5be73102beb1732cd933/pyhdfs.py#L509-L515
train
36,399