repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
thespacedoctor/transientNamer
|
transientNamer/search.py
|
search.files
|
python
|
def files(
self):
relatedFilesResultsList = []
relatedFilesResultsList[:] = [dict(l)
for l in self.relatedFilesResultsList]
return relatedFilesResultsList
|
*The associated source files*
**Usage:**
.. code-block:: python
sourceFiles = tns.files
|
train
|
https://github.com/thespacedoctor/transientNamer/blob/39be410c84275ed4669632f5df67e728d66a318f/transientNamer/search.py#L182-L195
| null |
class search():
"""
*The worker class for the transient namer search module*
**Key Arguments:**
- ``log`` -- logger
- ``settings`` -- the settings dictionary
- ``ra`` -- RA of the location being checked
- ``dec`` -- DEC of the location being searched
- ``radiusArcsec`` - the radius of the conesearch to perform against the TNS
- ``name`` -- name of the object to search the TNS for
- ``discInLastDays`` -- search the TNS for transient discovered in the last X days
- ``comments`` -- print the comments from the TNS, note these can be long making table outputs somewhat unreadable. Default *False*
**Usage:**
To initiate a search object to search the TNS via an object name (either TNS or survey names accepted):
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
name="Gaia16bbi"
)
or for a conesearch use something similar to:
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
ra="06:50:36.74",
dec="+31:06:44.7",
radiusArcsec=5
)
Note the search method can accept coordinates in sexagesimal or decimal defree formats.
To list all new objects discovered in the last three weeks, then use:
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
discInLastDays=21
)
"""
# Initialisation
def __init__(
self,
log,
ra="",
dec="",
radiusArcsec="",
name="",
discInLastDays="",
settings=False,
comments=False
):
self.log = log
log.debug("instansiating a new 'search' object")
self.settings = settings
self.ra = ra
self.dec = dec
self.radiusArcsec = radiusArcsec
self.comments = comments
self.name = name
self.internal_name = ""
self.discInLastDays = discInLastDays
self.page = 0
self.batchSize = 1000
# CREATE THE TIME-RANGE WINDOW TO SEARCH TNS
if not discInLastDays:
self.start = ""
self.end = ""
else:
discInLastDays = int(discInLastDays)
td = timedelta(days=1)
end = datetime.now() + td
self.end = end.strftime("%Y-%m-%d")
td = timedelta(days=discInLastDays)
start = datetime.now() - td
self.start = start.strftime("%Y-%m-%d")
# DETERMINE IF WE HAVE A TNS OR INTERAL SURVEY NAME
if self.name:
matchObject = re.match(r'^((SN|AT) ?)?(\d{4}\w{1,6})', self.name)
if matchObject:
self.name = matchObject.group(3)
else:
self.internal_name = self.name
self.name = ""
# DO THE SEARCH OF THE TNS AND COMPILE THE RESULTS INTO SEPARATE RESULT
# SETS
self.sourceResultsList, self.photResultsList, self.specResultsList, self.relatedFilesResultsList = self._query_tns()
self.sourceResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.sourceResultsList
)
self.photResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.photResultsList
)
self.specResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.specResultsList
)
self.relatedFilesResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.relatedFilesResultsList
)
return None
@property
def sources(
self):
"""*The results of the search returned as a python list of dictionaries*
**Usage:**
.. code-block:: python
sources = tns.sources
"""
sourceResultsList = []
sourceResultsList[:] = [dict(l) for l in self.sourceResultsList]
return sourceResultsList
@property
def spectra(
self):
"""*The associated source spectral data*
**Usage:**
.. code-block:: python
sourceSpectra = tns.spectra
"""
specResultsList = []
specResultsList[:] = [dict(l) for l in self.specResultsList]
return specResultsList
@property
@property
def photometry(
self):
"""*The associated source photometry*
**Usage:**
.. code-block:: python
sourcePhotometry = tns.photometry
"""
photResultsList = []
photResultsList[:] = [dict(l) for l in self.photResultsList]
return photResultsList
@property
def url(
self):
"""*The generated URL used for searching of the TNS*
**Usage:**
.. code-block:: python
searchURL = tns.url
"""
return self._searchURL
def csv(
self,
dirPath=None):
"""*Render the results in csv format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `csvSources` -- the top-level transient data
- `csvPhot` -- all photometry associated with the transients
- `csvSpec` -- all spectral data associated with the transients
- `csvFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in csv format:
.. code-block:: python
csvSources, csvPhot, csvSpec, csvFiles = tns.csv()
print csvSources
.. code-block:: text
TNSId,TNSName,discoveryName,discSurvey,raSex,decSex,raDeg,decDeg,transRedshift,specType,discMag,discMagFilter,discDate,objectUrl,hostName,hostRedshift,separationArcsec,separationNorthArcsec,separationEastArcsec
2016asf,SN2016asf,ASASSN-16cs,ASAS-SN,06:50:36.73,+31:06:45.36,102.6530,31.1126,0.021,SN Ia,17.1,V-Johnson,2016-03-06 08:09:36,http://wis-tns.weizmann.ac.il/object/2016asf,KUG 0647+311,,0.66,0.65,-0.13
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.csv("~/tns")
.. image:: https://i.imgur.com/BwwqMBg.png
:width: 800px
:alt: csv output
"""
if dirPath:
p = self._file_prefix()
csvSources = self.sourceResults.csv(
filepath=dirPath + "/" + p + "sources.csv")
csvPhot = self.photResults.csv(
filepath=dirPath + "/" + p + "phot.csv")
csvSpec = self.specResults.csv(
filepath=dirPath + "/" + p + "spec.csv")
csvFiles = self.relatedFilesResults.csv(
filepath=dirPath + "/" + p + "relatedFiles.csv")
else:
csvSources = self.sourceResults.csv()
csvPhot = self.photResults.csv()
csvSpec = self.specResults.csv()
csvFiles = self.relatedFilesResults.csv()
return csvSources, csvPhot, csvSpec, csvFiles
def json(
self,
dirPath=None):
"""*Render the results in json format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `jsonSources` -- the top-level transient data
- `jsonPhot` -- all photometry associated with the transients
- `jsonSpec` -- all spectral data associated with the transients
- `jsonFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in json format:
.. code-block:: python
jsonSources, jsonPhot, jsonSpec, jsonFiles = tns.json()
print jsonSources
.. code-block:: text
[
{
"TNSId": "2016asf",
"TNSName": "SN2016asf",
"decDeg": 31.1126,
"decSex": "+31:06:45.36",
"discDate": "2016-03-06 08:09:36",
"discMag": "17.1",
"discMagFilter": "V-Johnson",
"discSurvey": "ASAS-SN",
"discoveryName": "ASASSN-16cs",
"hostName": "KUG 0647+311",
"hostRedshift": null,
"objectUrl": "http://wis-tns.weizmann.ac.il/object/2016asf",
"raDeg": 102.65304166666667,
"raSex": "06:50:36.73",
"separationArcsec": "0.66",
"separationEastArcsec": "-0.13",
"separationNorthArcsec": "0.65",
"specType": "SN Ia",
"transRedshift": "0.021"
}
]
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.json("~/tns")
.. image:: https://i.imgur.com/wAHqARI.png
:width: 800px
:alt: json output
"""
if dirPath:
p = self._file_prefix()
jsonSources = self.sourceResults.json(
filepath=dirPath + "/" + p + "sources.json")
jsonPhot = self.photResults.json(
filepath=dirPath + "/" + p + "phot.json")
jsonSpec = self.specResults.json(
filepath=dirPath + "/" + p + "spec.json")
jsonFiles = self.relatedFilesResults.json(
filepath=dirPath + "/" + p + "relatedFiles.json")
else:
jsonSources = self.sourceResults.json()
jsonPhot = self.photResults.json()
jsonSpec = self.specResults.json()
jsonFiles = self.relatedFilesResults.json()
return jsonSources, jsonPhot, jsonSpec, jsonFiles
def yaml(
self,
dirPath=None):
"""*Render the results in yaml format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `yamlSources` -- the top-level transient data
- `yamlPhot` -- all photometry associated with the transients
- `yamlSpec` -- all spectral data associated with the transients
- `yamlFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in yaml format:
.. code-block:: python
yamlSources, yamlPhot, yamlSpec, yamlFiles = tns.yaml()
print yamlSources
.. code-block:: text
- TNSId: 2016asf
TNSName: SN2016asf
decDeg: 31.1126
decSex: '+31:06:45.36'
discDate: '2016-03-06 08:09:36'
discMag: '17.1'
discMagFilter: V-Johnson
discSurvey: ASAS-SN
discoveryName: ASASSN-16cs
hostName: KUG 0647+311
hostRedshift: null
objectUrl: http://wis-tns.weizmann.ac.il/object/2016asf
raDeg: 102.65304166666667
raSex: '06:50:36.73'
separationArcsec: '0.66'
separationEastArcsec: '-0.13'
separationNorthArcsec: '0.65'
specType: SN Ia
transRedshift: '0.021'
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.yaml("~/tns")
.. image:: https://i.imgur.com/ZpJIC6p.png
:width: 800px
:alt: yaml output
"""
if dirPath:
p = self._file_prefix()
yamlSources = self.sourceResults.yaml(
filepath=dirPath + "/" + p + "sources.yaml")
yamlPhot = self.photResults.yaml(
filepath=dirPath + "/" + p + "phot.yaml")
yamlSpec = self.specResults.yaml(
filepath=dirPath + "/" + p + "spec.yaml")
yamlFiles = self.relatedFilesResults.yaml(
filepath=dirPath + "/" + p + "relatedFiles.yaml")
else:
yamlSources = self.sourceResults.yaml()
yamlPhot = self.photResults.yaml()
yamlSpec = self.specResults.yaml()
yamlFiles = self.relatedFilesResults.yaml()
return yamlSources, yamlPhot, yamlSpec, yamlFiles
def markdown(
self,
dirPath=None):
"""*Render the results in markdown format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `markdownSources` -- the top-level transient data
- `markdownPhot` -- all photometry associated with the transients
- `markdownSpec` -- all spectral data associated with the transients
- `markdownFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in markdown table format:
.. code-block:: python
markdownSources, markdownPhot, markdownSpec, markdownFiles = tns.markdown()
print markdownSources
.. code-block:: text
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
|:---------|:-----------|:---------------|:------------|:-------------|:--------------|:----------|:---------|:---------------|:----------|:---------|:---------------|:---------------------|:----------------------------------------------|:--------------|:--------------|:------------------|:-----------------------|:----------------------|
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.markdown("~/tns")
.. image:: https://i.imgur.com/AYLBQoJ.png
:width: 800px
:alt: markdown output
"""
if dirPath:
p = self._file_prefix()
markdownSources = self.sourceResults.markdown(
filepath=dirPath + "/" + p + "sources.md")
markdownPhot = self.photResults.markdown(
filepath=dirPath + "/" + p + "phot.md")
markdownSpec = self.specResults.markdown(
filepath=dirPath + "/" + p + "spec.md")
markdownFiles = self.relatedFilesResults.markdown(
filepath=dirPath + "/" + p + "relatedFiles.md")
else:
markdownSources = self.sourceResults.markdown()
markdownPhot = self.photResults.markdown()
markdownSpec = self.specResults.markdown()
markdownFiles = self.relatedFilesResults.markdown()
return markdownSources, markdownPhot, markdownSpec, markdownFiles
def table(
self,
dirPath=None):
"""*Render the results as an ascii table*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `tableSources` -- the top-level transient data
- `tablePhot` -- all photometry associated with the transients
- `tableSpec` -- all spectral data associated with the transients
- `tableFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in ascii table format:
.. code-block:: python
tableSources, tablePhot, tableSpec, tableFiles = tns.table()
print tableSources
.. code-block:: text
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.table("~/tns")
.. image:: https://i.imgur.com/m09M0ho.png
:width: 800px
:alt: ascii files
"""
if dirPath:
p = self._file_prefix()
tableSources = self.sourceResults.table(
filepath=dirPath + "/" + p + "sources.ascii")
tablePhot = self.photResults.table(
filepath=dirPath + "/" + p + "phot.ascii")
tableSpec = self.specResults.table(
filepath=dirPath + "/" + p + "spec.ascii")
tableFiles = self.relatedFilesResults.table(
filepath=dirPath + "/" + p + "relatedFiles.ascii")
else:
tableSources = self.sourceResults.table()
tablePhot = self.photResults.table()
tableSpec = self.specResults.table()
tableFiles = self.relatedFilesResults.table()
return tableSources, tablePhot, tableSpec, tableFiles
def mysql(
self,
tableNamePrefix="TNS",
dirPath=None):
"""*Render the results as MySQL Insert statements*
**Key Arguments:**
- ``tableNamePrefix`` -- the prefix for the database table names to assign the insert statements to. Default *TNS*.
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `mysqlSources` -- the top-level transient data
- `mysqlPhot` -- all photometry associated with the transients
- `mysqlSpec` -- all spectral data associated with the transients
- `mysqlFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in mysql insert format:
.. code-block:: python
mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles = tns.mysql("TNS")
print mysqlSources
.. code-block:: text
INSERT INTO `TNS_sources` (TNSId,TNSName,dateCreated,decDeg,decSex,discDate,discMag,discMagFilter,discSurvey,discoveryName,hostName,hostRedshift,objectUrl,raDeg,raSex,separationArcsec,separationEastArcsec,separationNorthArcsec,specType,transRedshift) VALUES ("2016asf" ,"SN2016asf" ,"2016-09-20T11:22:13" ,"31.1126" ,"+31:06:45.36" ,"2016-03-06 08:09:36" ,"17.1" ,"V-Johnson" ,"ASAS-SN" ,"ASASSN-16cs" ,"KUG 0647+311" ,null ,"http://wis-tns.weizmann.ac.il/object/2016asf" ,"102.653041667" ,"06:50:36.73" ,"0.66" ,"-0.13" ,"0.65" ,"SN Ia" ,"0.021") ON DUPLICATE KEY UPDATE TNSId="2016asf", TNSName="SN2016asf", dateCreated="2016-09-20T11:22:13", decDeg="31.1126", decSex="+31:06:45.36", discDate="2016-03-06 08:09:36", discMag="17.1", discMagFilter="V-Johnson", discSurvey="ASAS-SN", discoveryName="ASASSN-16cs", hostName="KUG 0647+311", hostRedshift=null, objectUrl="http://wis-tns.weizmann.ac.il/object/2016asf", raDeg="102.653041667", raSex="06:50:36.73", separationArcsec="0.66", separationEastArcsec="-0.13", separationNorthArcsec="0.65", specType="SN Ia", transRedshift="0.021", updated=1, dateLastModified=NOW() ;
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.mysql("TNS", "~/tns")
.. image:: https://i.imgur.com/CozySPW.png
:width: 800px
:alt: mysql output
"""
if dirPath:
p = self._file_prefix()
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_sources` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(20) NOT NULL,
`TNSName` varchar(20) DEFAULT NULL,
`dateCreated` datetime DEFAULT NULL,
`decDeg` double DEFAULT NULL,
`decSex` varchar(45) DEFAULT NULL,
`discDate` datetime DEFAULT NULL,
`discMag` double DEFAULT NULL,
`discMagFilter` varchar(45) DEFAULT NULL,
`discSurvey` varchar(100) DEFAULT NULL,
`discoveryName` varchar(100) DEFAULT NULL,
`objectUrl` varchar(200) DEFAULT NULL,
`raDeg` double DEFAULT NULL,
`raSex` varchar(45) DEFAULT NULL,
`specType` varchar(100) DEFAULT NULL,
`transRedshift` double DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`hostName` VARCHAR(100) NULL DEFAULT NULL,
`hostRedshift` DOUBLE NULL DEFAULT NULL,
`survey` VARCHAR(100) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid` (`TNSId`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlSources = self.sourceResults.mysql(
tableNamePrefix + "_sources", filepath=dirPath + "/" + p + "sources.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_photometry` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(20) NOT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`exptime` double DEFAULT NULL,
`filter` varchar(100) DEFAULT NULL,
`limitingMag` tinyint(4) DEFAULT NULL,
`mag` double DEFAULT NULL,
`magErr` double DEFAULT NULL,
`magUnit` varchar(100) DEFAULT NULL,
`objectName` varchar(100) DEFAULT NULL,
`obsdate` datetime DEFAULT NULL,
`reportAddedDate` datetime DEFAULT NULL,
`suggestedType` varchar(100) DEFAULT NULL,
`survey` varchar(100) DEFAULT NULL,
`telescope` varchar(100) DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`remarks` VARCHAR(800) NULL DEFAULT NULL,
`sourceComment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid_survey_obsdate` (`TNSId`,`survey`,`obsdate`),
UNIQUE INDEX `u_tnsid_survey_obsdate` (`TNSId` ASC, `survey` ASC, `obsdate` ASC),
UNIQUE INDEX `u_tnsid_obsdate_objname` (`TNSId` ASC, `obsdate` ASC, `objectName` ASC)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlPhot = self.photResults.mysql(
tableNamePrefix + "_photometry", filepath=dirPath + "/" + p + "phot.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_spectra` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(45) NOT NULL,
`TNSuser` varchar(45) DEFAULT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`exptime` double DEFAULT NULL,
`obsdate` datetime DEFAULT NULL,
`reportAddedDate` datetime DEFAULT NULL,
`specType` varchar(100) DEFAULT NULL,
`survey` varchar(100) DEFAULT NULL,
`telescope` varchar(100) DEFAULT NULL,
`transRedshift` double DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`remarks` VARCHAR(800) NULL DEFAULT NULL,
`sourceComment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `u_tnsid_survey_obsdate` (`TNSId`,`survey`,`obsdate`),
UNIQUE KEY `u_id_user_obsdate` (`TNSId`,`TNSuser`,`obsdate`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlSpec = self.specResults.mysql(
tableNamePrefix + "_spectra", filepath=dirPath + "/" + p + "spec.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_files` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(100) NOT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`dateObs` datetime DEFAULT NULL,
`filename` varchar(200) DEFAULT NULL,
`spec1phot2` tinyint(4) DEFAULT NULL,
`url` varchar(800) DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`comment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid_url` (`TNSId`,`url`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlFiles = self.relatedFilesResults.mysql(
tableNamePrefix + "_files", filepath=dirPath + "/" + p + "relatedFiles.sql", createStatement=createStatement)
else:
mysqlSources = self.sourceResults.mysql(
tableNamePrefix + "_sources")
mysqlPhot = self.photResults.mysql(tableNamePrefix + "_photometry")
mysqlSpec = self.specResults.mysql(tableNamePrefix + "_spectra")
mysqlFiles = self.relatedFilesResults.mysql(
tableNamePrefix + "_files")
return mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles
def _query_tns(self):
"""
*determine how to query the TNS, send query and parse the results*
**Return:**
- ``results`` -- a list of dictionaries (one dictionary for each result set returned from the TNS)
"""
self.log.info('starting the ``get`` method')
sourceTable = []
photoTable = []
specTable = []
relatedFilesTable = []
# THIS stop IS TO KEEP TRACK OF THE TNS PAGINATION IF MANY RESULT PAGES
# ARE RETURNED
stop = False
sourceCount = 0
while not stop:
status_code, content, self._searchURL = self._get_tns_search_results()
if status_code != 200:
self.log.error(
'cound not get the search reuslts from the TNS, HTML error code %(status_code)s ' % locals())
return None
if "No results found" in content:
print "No results found"
return sourceTable, photoTable, specTable, relatedFilesTable
if self._parse_transient_rows(content, True) < self.batchSize:
stop = True
else:
self.page += 1
thisPage = self.page
print "Downloaded %(thisPage)s page(s) from the TNS. %(sourceCount)s transients parsed so far." % locals()
sourceCount += self.batchSize
print "\t" + self._searchURL
timesleep.sleep(1)
# PARSE ALL ROWS RETURNED
for transientRow in self._parse_transient_rows(content):
# TOP LEVEL DISCOVERY CONTENT
sourceContent = transientRow.group()
discInfo, TNSId = self._parse_discovery_information(
sourceContent)
sourceTable.append(discInfo)
# PHOTOMETERY
phot, relatedFiles = self._parse_photometry_data(
sourceContent, TNSId)
photoTable += phot
relatedFilesTable += relatedFiles
# SPECTRA
spec, relatedFiles = self._parse_spectral_data(
sourceContent, TNSId)
specTable += spec
relatedFilesTable += relatedFiles
# SORT BY SEPARATION FROM THE SEARCH COORDINATES
try:
sourceTable = sorted(sourceTable, key=itemgetter(
'separationArcsec'), reverse=False)
except:
pass
self.log.info('completed the ``get`` method')
return sourceTable, photoTable, specTable, relatedFilesTable
def _get_tns_search_results(
self):
"""
*query the tns and result the response*
"""
self.log.info('starting the ``_get_tns_search_results`` method')
try:
response = requests.get(
url="http://wis-tns.weizmann.ac.il/search",
params={
"page": self.page,
"ra": self.ra,
"decl": self.dec,
"radius": self.radiusArcsec,
"name": self.name,
"internal_name": self.internal_name,
"date_start[date]": self.start,
"date_end[date]": self.end,
"num_page": self.batchSize,
"display[redshift]": "1",
"display[hostname]": "1",
"display[host_redshift]": "1",
"display[source_group_name]": "1",
"display[internal_name]": "1",
"display[spectra_count]": "1",
"display[discoverymag]": "1",
"display[discmagfilter]": "1",
"display[discoverydate]": "1",
"display[discoverer]": "1",
"display[sources]": "1",
"display[bibcode]": "1",
},
)
except requests.exceptions.RequestException:
print('HTTP Request failed')
self.log.info('completed the ``_get_tns_search_results`` method')
return response.status_code, response.content, response.url
def _file_prefix(
self):
"""*Generate a file prefix based on the type of search for saving files to disk*
**Return:**
- ``prefix`` -- the file prefix
"""
self.log.info('starting the ``_file_prefix`` method')
if self.ra:
now = datetime.now()
prefix = now.strftime("%Y%m%dt%H%M%S%f_tns_conesearch_")
elif self.name:
prefix = self.name + "_tns_conesearch_"
elif self.internal_name:
prefix = self.internal_name + "_tns_conesearch_"
elif self.discInLastDays:
discInLastDays = str(self.discInLastDays)
now = datetime.now()
prefix = now.strftime(
discInLastDays + "d_since_%Y%m%d_tns_conesearch_")
self.log.info('completed the ``_file_prefix`` method')
return prefix
def _parse_transient_rows(
self,
content,
count=False):
"""* parse transient rows from the TNS result page content*
**Key Arguments:**
- ``content`` -- the content from the TNS results page.
- ``count`` -- return only the number of rows
**Return:**
- ``transientRows``
"""
self.log.info('starting the ``_parse_transient_rows`` method')
regexForRow = r"""\n([^\n]*?<a href="/object/.*?)(?=\n[^\n]*?<a href="/object/|<\!\-\- /\.section, /#content \-\->)"""
if count:
# A SINGLE SOURCE BLOCK
matchedSources = re.findall(
regexForRow,
content,
flags=re.S # re.S
)
return len(matchedSources)
# A SINGLE SOURCE BLOCK
matchedSources = re.finditer(
regexForRow,
content,
flags=re.S # re.S
)
self.log.info('completed the ``_parse_transient_rows`` method')
return matchedSources
def _parse_discovery_information(
self,
content):
"""* parse discovery information from one row on the TNS results page*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page.
**Return:**
- ``discoveryData`` -- dictionary of results
- ``TNSId`` -- the unique TNS id for the transient
"""
self.log.info('starting the ``_parse_discovery_information`` method')
# ASTROCALC UNIT CONVERTER OBJECT
converter = unit_conversion(
log=self.log
)
matches = re.finditer(
r"""<tr class="row-.*?"><td class="cell-id">(?P<tnsId>\d*?)</td><td class="cell-name"><a href="(?P<objectUrl>.*?)">(?P<TNSName>.*?)</a></td><td class="cell-.*?<td class="cell-ra">(?P<raSex>.*?)</td><td class="cell-decl">(?P<decSex>.*?)</td><td class="cell-ot_name">(?P<specType>.*?)</td><td class="cell-redshift">(?P<transRedshift>.*?)</td><td class="cell-hostname">(?P<hostName>.*?)</td><td class="cell-host_redshift">(?P<hostRedshift>.*?)</td><td class="cell-source_group_name">(?P<discSurvey>.*?)</td>.*?<td class="cell-internal_name">(<a.*?>)?(?P<discoveryName>.*?)(</a>)?</td>.*?<td class="cell-discoverymag">(?P<discMag>.*?)</td><td class="cell-disc_filter_name">(?P<discMagFilter>.*?)</td><td class="cell-discoverydate">(?P<discDate>.*?)</td><td class="cell-discoverer">(?P<sender>.*?)</td>.*?</tr>""",
content,
flags=0 # re.S
)
discoveryData = []
for match in matches:
row = match.groupdict()
for k, v in row.iteritems():
row[k] = v.strip()
if len(v) == 0:
row[k] = None
if row["transRedshift"] == 0:
row["transRedshift"] = None
if row["TNSName"][0] in ["1", "2"]:
row["TNSName"] = "SN" + row["TNSName"]
row["objectUrl"] = "http://wis-tns.weizmann.ac.il" + \
row["objectUrl"]
# CONVERT COORDINATES TO DECIMAL DEGREES
row["raDeg"] = converter.ra_sexegesimal_to_decimal(
ra=row["raSex"]
)
row["decDeg"] = converter.dec_sexegesimal_to_decimal(
dec=row["decSex"]
)
# IF THIS IS A COORDINATE SEARCH, ADD SEPARATION FROM
# ORIGINAL QUERY COORDINATES
if self.ra:
# CALCULATE SEPARATION IN ARCSEC
from astrocalc.coords import separations
calculator = separations(
log=self.log,
ra1=self.ra,
dec1=self.dec,
ra2=row["raDeg"],
dec2=row["decDeg"],
)
angularSeparation, north, east = calculator.get()
row["separationArcsec"] = angularSeparation
row["separationNorthArcsec"] = north
row["separationEastArcsec"] = east
if not row["discSurvey"]:
row["survey"] = row["sender"]
del row["sender"]
del row["tnsId"]
row["TNSName"] = row["TNSName"].replace(" ", "")
row["TNSId"] = row["TNSName"].replace(
"SN", "").replace("AT", "")
TNSId = row["TNSId"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "TNSName", "discoveryName", "discSurvey", "raSex", "decSex", "raDeg", "decDeg",
"transRedshift", "specType", "discMag", "discMagFilter", "discDate", "objectUrl", "hostName", "hostRedshift", "separationArcsec", "separationNorthArcsec", "separationEastArcsec"]
for k, v in row.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = row[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
discoveryData.append(row)
self.log.info('completed the ``_parse_discovery_information`` method')
return discoveryData[0], TNSId
def _parse_photometry_data(
self,
content,
TNSId):
"""*parse photometry data from a row in the tns results content*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page
- ``TNSId`` -- the tns id of the transient
**Return:**
- ``photData`` -- a list of dictionaries of the photometry data
- ``relatedFilesTable`` -- a list of dictionaries of transient photometry related files
"""
self.log.info('starting the ``_parse_photometry_data`` method')
photData = []
relatedFilesTable = []
# AT REPORT BLOCK
ATBlock = re.search(
r"""<tr class=[^\n]*?AT reportings.*?(?=<tr class=[^\n]*?Classification reportings|$)""",
content,
flags=re.S # re.S
)
if ATBlock:
ATBlock = ATBlock.group()
reports = re.finditer(
r"""<tr class="row-[^"]*"><td class="cell-id">.*?</table>""",
ATBlock,
flags=re.S # re.S
)
relatedFiles = self._parse_related_files(ATBlock)
for r in reports:
header = re.search(
r"""<tr class="row[^"]*".*?time_received">(?P<reportAddedDate>[^<]*).*?user_name">(?P<sender>[^<]*).*?reporter_name">(?P<reporters>[^<]*).*?source_group_name">(?P<surveyGroup>[^<]*).*?ra">(?P<ra>[^<]*).*?decl">(?P<dec>[^<]*).*?discovery_date">(?P<obsDate>[^<]*).*?flux">(?P<mag>[^<]*).*?filter_name">(?P<magFilter>[^<]*).*?related_files">(?P<relatedFiles>[^<]*).*?type_name">(?P<suggestedType>[^<]*).*?hostname">(?P<hostName>[^<]*).*?host_redshift">(?P<hostRedshift>[^<]*).*?internal_name">(?P<objectName>[^<]*).*?groups">(?P<survey>[^<]*).*?remarks">(?P<sourceComment>[^<]*)""",
r.group(),
flags=0 # re.S
)
try:
header = header.groupdict()
except:
print r.group()
header["TNSId"] = TNSId
del header["reporters"]
del header["surveyGroup"]
del header["hostName"]
del header["hostRedshift"]
del header["mag"]
del header["magFilter"]
del header["obsDate"]
del header["ra"]
del header["dec"]
if not self.comments:
del header['sourceComment']
else:
theseComments = header[
"sourceComment"].split("\n")
header["sourceComment"] = ""
for c in theseComments:
header["sourceComment"] += " " + c.strip()
header["sourceComment"] = header[
"sourceComment"].strip().replace('"', "'")[0:750]
phot = re.finditer(
r"""<tr class="row\-[^"]*".*?obsdate">(?P<obsdate>[^<]*).*?flux">(?P<mag>[^<]*).*?fluxerr">(?P<magErr>[^<]*).*?limflux">(?P<limitingMag>[^<]*).*?unit_name">(?P<magUnit>[^<]*).*?filter_name">(?P<filter>[^<]*).*?tel_inst">(?P<telescope>[^<]*).*?exptime">(?P<exptime>[^<]*).*?observer">(?P<observer>[^<]*).*?-remarks">(?P<remarks>[^<]*)""",
r.group(),
flags=0 # re.S
)
filesAppended = False
for p in phot:
p = p.groupdict()
del p["observer"]
if p["limitingMag"] and not p["mag"]:
p["mag"] = p["limitingMag"]
p["limitingMag"] = 1
p["remarks"] = p["remarks"].replace(
"[Last non detection]", "")
else:
p["limitingMag"] = 0
if not self.comments:
del p["remarks"]
p.update(header)
if p["relatedFiles"] and filesAppended == False:
filesAppended = True
for f in relatedFiles:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = f[
"filepath"].split("/")[-1]
thisFile["url"] = f["filepath"]
if self.comments:
thisFile["comment"] = f[
"fileComment"].replace("\n", " ").strip().replace('"', "'")[0:750]
thisFile["dateObs"] = p["obsdate"]
thisFile["spec1phot2"] = 2
relatedFilesTable.append(thisFile)
if not p["survey"] and not p["objectName"]:
p["survey"] = p["sender"]
del p["relatedFiles"]
del p["sender"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "survey", "obsdate", "filter", "limitingMag", "mag", "magErr",
"magUnit", "suggestedType", "telescope", "exptime", "reportAddedDate"]
for k, v in p.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = p[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
photData.append(orow)
self.log.info('completed the ``_parse_photometry_data`` method')
return photData, relatedFilesTable
def _parse_related_files(
self,
content):
"""*parse the contents for related files URLs and comments*
**Key Arguments:**
- ``content`` -- the content to parse.
**Return:**
- ``relatedFiles`` -- a list of dictionaries of transient related files
"""
self.log.info('starting the ``_parse_related_files`` method')
relatedFilesList = re.finditer(
r"""<td class="cell-filename">.*?href="(?P<filepath>[^"]*).*?remarks">(?P<fileComment>[^<]*)""",
content,
flags=0 # re.S
)
relatedFiles = []
for f in relatedFilesList:
f = f.groupdict()
relatedFiles.append(f)
self.log.info('completed the ``_parse_related_files`` method')
return relatedFiles
def _parse_spectral_data(
self,
content,
TNSId):
"""*parse spectra data from a row in the tns results content*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page
- ``TNSId`` -- the tns id of the transient
**Return:**
- ``specData`` -- a list of dictionaries of the spectral data
- ``relatedFilesTable`` -- a list of dictionaries of transient spectrum related files
"""
self.log.info('starting the ``_parse_spectral_data`` method')
specData = []
relatedFilesTable = []
# CLASSIFICATION BLOCK
classBlock = re.search(
r"""<tr class=[^\n]*?Classification reportings.*$""",
content,
flags=re.S # re.S
)
if classBlock:
classBlock = classBlock.group()
reports = re.finditer(
r"""<tr class="row-[^"]*"><td class="cell-id">.*?</tbody>\s*</table>\s*</div></td> </tr>\s*</tbody>\s*</table>\s*</div></td> </tr>""",
classBlock,
flags=re.S #
)
relatedFiles = self._parse_related_files(classBlock)
for r in reports:
header = re.search(
r"""<tr class="row.*?time_received">(?P<reportAddedDate>[^<]*).*?user_name">(?P<TNSuser>[^<]*).*?classifier_name">(?P<reporters>[^<]*).*?source_group_name">(?P<survey>[^<]*).*?-type">(?P<specType>[^<]*).*?-redshift">(?P<transRedshift>[^<]*).*?-related_files">(?P<relatedFiles>[^<]*).*?-groups">(?P<surveyGroup>[^<]*).*?-remarks">(?P<sourceComment>[^<]*)</td>""",
r.group(),
flags=re.S # re.S
)
if not header:
continue
header = header.groupdict()
header["TNSId"] = TNSId
del header["reporters"]
del header["surveyGroup"]
del header["survey"]
if not self.comments:
del header['sourceComment']
else:
theseComments = header[
"sourceComment"].split("\n")
header["sourceComment"] = ""
for c in theseComments:
header["sourceComment"] += " " + c.strip()
header["sourceComment"] = header[
"sourceComment"].strip().replace('"', "'")[0:750]
spec = re.finditer(
r"""<tr class="class-results-.*?-obsdate">(?P<obsdate>[^<]*).*?-tel_inst">(?P<telescope>[^<]*).*?-exptime">(?P<exptime>[^<]*).*?-observer">(?P<sender>[^<]*).*?-reducer">(?P<reducer>[^<]*).*?-source_group_name">(?P<survey>[^<]*).*?-asciifile">(.*?<a href="(?P<filepath>[^"]*)".*?</a>)?.*?-fitsfile">(.*?<a href="(?P<fitsFilepath>[^"]*)".*?</a>)?.*?-groups">(?P<surveyGroup>[^<]*).*?-remarks">(?P<remarks>[^<]*)""",
r.group(),
flags=0 # re.S
)
filesAppended = False
for s in spec:
s = s.groupdict()
del s["sender"]
del s["surveyGroup"]
del s["reducer"]
if not self.comments:
del s["remarks"]
else:
s["remarks"] = s["remarks"].replace('"', "'")[0:750]
s.update(header)
if s["relatedFiles"] and filesAppended == False:
filesAppended = True
for f in relatedFiles:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = f[
"filepath"].split("/")[-1]
thisFile["url"] = f["filepath"]
if self.comments:
thisFile["comment"] = f[
"fileComment"].replace("\n", " ").strip()
thisFile["dateObs"] = s["obsdate"]
thisFile["spec1phot2"] = 1
relatedFilesTable.append(thisFile)
for ffile in [s["filepath"], s["fitsFilepath"]]:
if ffile:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = ffile.split(
"/")[-1]
thisFile["url"] = ffile
if self.comments:
thisFile["comment"] = ""
thisFile["dateObs"] = s["obsdate"]
thisFile["spec1phot2"] = 1
relatedFilesTable.append(thisFile)
del s["filepath"]
del s["fitsFilepath"]
del s["relatedFiles"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "survey", "obsdate", "specType", "transRedshift",
"telescope", "exptime", "reportAddedDate", "TNSuser"]
for k, v in s.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = s[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
specData.append(orow)
self.log.info('completed the ``_parse_spectral_data`` method')
return specData, relatedFilesTable
|
thespacedoctor/transientNamer
|
transientNamer/search.py
|
search.photometry
|
python
|
def photometry(
self):
photResultsList = []
photResultsList[:] = [dict(l) for l in self.photResultsList]
return photResultsList
|
*The associated source photometry*
**Usage:**
.. code-block:: python
sourcePhotometry = tns.photometry
|
train
|
https://github.com/thespacedoctor/transientNamer/blob/39be410c84275ed4669632f5df67e728d66a318f/transientNamer/search.py#L198-L210
| null |
class search():
"""
*The worker class for the transient namer search module*
**Key Arguments:**
- ``log`` -- logger
- ``settings`` -- the settings dictionary
- ``ra`` -- RA of the location being checked
- ``dec`` -- DEC of the location being searched
- ``radiusArcsec`` - the radius of the conesearch to perform against the TNS
- ``name`` -- name of the object to search the TNS for
- ``discInLastDays`` -- search the TNS for transient discovered in the last X days
- ``comments`` -- print the comments from the TNS, note these can be long making table outputs somewhat unreadable. Default *False*
**Usage:**
To initiate a search object to search the TNS via an object name (either TNS or survey names accepted):
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
name="Gaia16bbi"
)
or for a conesearch use something similar to:
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
ra="06:50:36.74",
dec="+31:06:44.7",
radiusArcsec=5
)
Note the search method can accept coordinates in sexagesimal or decimal defree formats.
To list all new objects discovered in the last three weeks, then use:
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
discInLastDays=21
)
"""
# Initialisation
def __init__(
self,
log,
ra="",
dec="",
radiusArcsec="",
name="",
discInLastDays="",
settings=False,
comments=False
):
self.log = log
log.debug("instansiating a new 'search' object")
self.settings = settings
self.ra = ra
self.dec = dec
self.radiusArcsec = radiusArcsec
self.comments = comments
self.name = name
self.internal_name = ""
self.discInLastDays = discInLastDays
self.page = 0
self.batchSize = 1000
# CREATE THE TIME-RANGE WINDOW TO SEARCH TNS
if not discInLastDays:
self.start = ""
self.end = ""
else:
discInLastDays = int(discInLastDays)
td = timedelta(days=1)
end = datetime.now() + td
self.end = end.strftime("%Y-%m-%d")
td = timedelta(days=discInLastDays)
start = datetime.now() - td
self.start = start.strftime("%Y-%m-%d")
# DETERMINE IF WE HAVE A TNS OR INTERAL SURVEY NAME
if self.name:
matchObject = re.match(r'^((SN|AT) ?)?(\d{4}\w{1,6})', self.name)
if matchObject:
self.name = matchObject.group(3)
else:
self.internal_name = self.name
self.name = ""
# DO THE SEARCH OF THE TNS AND COMPILE THE RESULTS INTO SEPARATE RESULT
# SETS
self.sourceResultsList, self.photResultsList, self.specResultsList, self.relatedFilesResultsList = self._query_tns()
self.sourceResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.sourceResultsList
)
self.photResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.photResultsList
)
self.specResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.specResultsList
)
self.relatedFilesResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.relatedFilesResultsList
)
return None
@property
def sources(
self):
"""*The results of the search returned as a python list of dictionaries*
**Usage:**
.. code-block:: python
sources = tns.sources
"""
sourceResultsList = []
sourceResultsList[:] = [dict(l) for l in self.sourceResultsList]
return sourceResultsList
@property
def spectra(
self):
"""*The associated source spectral data*
**Usage:**
.. code-block:: python
sourceSpectra = tns.spectra
"""
specResultsList = []
specResultsList[:] = [dict(l) for l in self.specResultsList]
return specResultsList
@property
def files(
self):
"""*The associated source files*
**Usage:**
.. code-block:: python
sourceFiles = tns.files
"""
relatedFilesResultsList = []
relatedFilesResultsList[:] = [dict(l)
for l in self.relatedFilesResultsList]
return relatedFilesResultsList
@property
@property
def url(
self):
"""*The generated URL used for searching of the TNS*
**Usage:**
.. code-block:: python
searchURL = tns.url
"""
return self._searchURL
def csv(
self,
dirPath=None):
"""*Render the results in csv format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `csvSources` -- the top-level transient data
- `csvPhot` -- all photometry associated with the transients
- `csvSpec` -- all spectral data associated with the transients
- `csvFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in csv format:
.. code-block:: python
csvSources, csvPhot, csvSpec, csvFiles = tns.csv()
print csvSources
.. code-block:: text
TNSId,TNSName,discoveryName,discSurvey,raSex,decSex,raDeg,decDeg,transRedshift,specType,discMag,discMagFilter,discDate,objectUrl,hostName,hostRedshift,separationArcsec,separationNorthArcsec,separationEastArcsec
2016asf,SN2016asf,ASASSN-16cs,ASAS-SN,06:50:36.73,+31:06:45.36,102.6530,31.1126,0.021,SN Ia,17.1,V-Johnson,2016-03-06 08:09:36,http://wis-tns.weizmann.ac.il/object/2016asf,KUG 0647+311,,0.66,0.65,-0.13
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.csv("~/tns")
.. image:: https://i.imgur.com/BwwqMBg.png
:width: 800px
:alt: csv output
"""
if dirPath:
p = self._file_prefix()
csvSources = self.sourceResults.csv(
filepath=dirPath + "/" + p + "sources.csv")
csvPhot = self.photResults.csv(
filepath=dirPath + "/" + p + "phot.csv")
csvSpec = self.specResults.csv(
filepath=dirPath + "/" + p + "spec.csv")
csvFiles = self.relatedFilesResults.csv(
filepath=dirPath + "/" + p + "relatedFiles.csv")
else:
csvSources = self.sourceResults.csv()
csvPhot = self.photResults.csv()
csvSpec = self.specResults.csv()
csvFiles = self.relatedFilesResults.csv()
return csvSources, csvPhot, csvSpec, csvFiles
def json(
self,
dirPath=None):
"""*Render the results in json format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `jsonSources` -- the top-level transient data
- `jsonPhot` -- all photometry associated with the transients
- `jsonSpec` -- all spectral data associated with the transients
- `jsonFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in json format:
.. code-block:: python
jsonSources, jsonPhot, jsonSpec, jsonFiles = tns.json()
print jsonSources
.. code-block:: text
[
{
"TNSId": "2016asf",
"TNSName": "SN2016asf",
"decDeg": 31.1126,
"decSex": "+31:06:45.36",
"discDate": "2016-03-06 08:09:36",
"discMag": "17.1",
"discMagFilter": "V-Johnson",
"discSurvey": "ASAS-SN",
"discoveryName": "ASASSN-16cs",
"hostName": "KUG 0647+311",
"hostRedshift": null,
"objectUrl": "http://wis-tns.weizmann.ac.il/object/2016asf",
"raDeg": 102.65304166666667,
"raSex": "06:50:36.73",
"separationArcsec": "0.66",
"separationEastArcsec": "-0.13",
"separationNorthArcsec": "0.65",
"specType": "SN Ia",
"transRedshift": "0.021"
}
]
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.json("~/tns")
.. image:: https://i.imgur.com/wAHqARI.png
:width: 800px
:alt: json output
"""
if dirPath:
p = self._file_prefix()
jsonSources = self.sourceResults.json(
filepath=dirPath + "/" + p + "sources.json")
jsonPhot = self.photResults.json(
filepath=dirPath + "/" + p + "phot.json")
jsonSpec = self.specResults.json(
filepath=dirPath + "/" + p + "spec.json")
jsonFiles = self.relatedFilesResults.json(
filepath=dirPath + "/" + p + "relatedFiles.json")
else:
jsonSources = self.sourceResults.json()
jsonPhot = self.photResults.json()
jsonSpec = self.specResults.json()
jsonFiles = self.relatedFilesResults.json()
return jsonSources, jsonPhot, jsonSpec, jsonFiles
def yaml(
self,
dirPath=None):
"""*Render the results in yaml format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `yamlSources` -- the top-level transient data
- `yamlPhot` -- all photometry associated with the transients
- `yamlSpec` -- all spectral data associated with the transients
- `yamlFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in yaml format:
.. code-block:: python
yamlSources, yamlPhot, yamlSpec, yamlFiles = tns.yaml()
print yamlSources
.. code-block:: text
- TNSId: 2016asf
TNSName: SN2016asf
decDeg: 31.1126
decSex: '+31:06:45.36'
discDate: '2016-03-06 08:09:36'
discMag: '17.1'
discMagFilter: V-Johnson
discSurvey: ASAS-SN
discoveryName: ASASSN-16cs
hostName: KUG 0647+311
hostRedshift: null
objectUrl: http://wis-tns.weizmann.ac.il/object/2016asf
raDeg: 102.65304166666667
raSex: '06:50:36.73'
separationArcsec: '0.66'
separationEastArcsec: '-0.13'
separationNorthArcsec: '0.65'
specType: SN Ia
transRedshift: '0.021'
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.yaml("~/tns")
.. image:: https://i.imgur.com/ZpJIC6p.png
:width: 800px
:alt: yaml output
"""
if dirPath:
p = self._file_prefix()
yamlSources = self.sourceResults.yaml(
filepath=dirPath + "/" + p + "sources.yaml")
yamlPhot = self.photResults.yaml(
filepath=dirPath + "/" + p + "phot.yaml")
yamlSpec = self.specResults.yaml(
filepath=dirPath + "/" + p + "spec.yaml")
yamlFiles = self.relatedFilesResults.yaml(
filepath=dirPath + "/" + p + "relatedFiles.yaml")
else:
yamlSources = self.sourceResults.yaml()
yamlPhot = self.photResults.yaml()
yamlSpec = self.specResults.yaml()
yamlFiles = self.relatedFilesResults.yaml()
return yamlSources, yamlPhot, yamlSpec, yamlFiles
def markdown(
self,
dirPath=None):
"""*Render the results in markdown format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `markdownSources` -- the top-level transient data
- `markdownPhot` -- all photometry associated with the transients
- `markdownSpec` -- all spectral data associated with the transients
- `markdownFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in markdown table format:
.. code-block:: python
markdownSources, markdownPhot, markdownSpec, markdownFiles = tns.markdown()
print markdownSources
.. code-block:: text
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
|:---------|:-----------|:---------------|:------------|:-------------|:--------------|:----------|:---------|:---------------|:----------|:---------|:---------------|:---------------------|:----------------------------------------------|:--------------|:--------------|:------------------|:-----------------------|:----------------------|
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.markdown("~/tns")
.. image:: https://i.imgur.com/AYLBQoJ.png
:width: 800px
:alt: markdown output
"""
if dirPath:
p = self._file_prefix()
markdownSources = self.sourceResults.markdown(
filepath=dirPath + "/" + p + "sources.md")
markdownPhot = self.photResults.markdown(
filepath=dirPath + "/" + p + "phot.md")
markdownSpec = self.specResults.markdown(
filepath=dirPath + "/" + p + "spec.md")
markdownFiles = self.relatedFilesResults.markdown(
filepath=dirPath + "/" + p + "relatedFiles.md")
else:
markdownSources = self.sourceResults.markdown()
markdownPhot = self.photResults.markdown()
markdownSpec = self.specResults.markdown()
markdownFiles = self.relatedFilesResults.markdown()
return markdownSources, markdownPhot, markdownSpec, markdownFiles
def table(
self,
dirPath=None):
"""*Render the results as an ascii table*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `tableSources` -- the top-level transient data
- `tablePhot` -- all photometry associated with the transients
- `tableSpec` -- all spectral data associated with the transients
- `tableFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in ascii table format:
.. code-block:: python
tableSources, tablePhot, tableSpec, tableFiles = tns.table()
print tableSources
.. code-block:: text
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.table("~/tns")
.. image:: https://i.imgur.com/m09M0ho.png
:width: 800px
:alt: ascii files
"""
if dirPath:
p = self._file_prefix()
tableSources = self.sourceResults.table(
filepath=dirPath + "/" + p + "sources.ascii")
tablePhot = self.photResults.table(
filepath=dirPath + "/" + p + "phot.ascii")
tableSpec = self.specResults.table(
filepath=dirPath + "/" + p + "spec.ascii")
tableFiles = self.relatedFilesResults.table(
filepath=dirPath + "/" + p + "relatedFiles.ascii")
else:
tableSources = self.sourceResults.table()
tablePhot = self.photResults.table()
tableSpec = self.specResults.table()
tableFiles = self.relatedFilesResults.table()
return tableSources, tablePhot, tableSpec, tableFiles
def mysql(
self,
tableNamePrefix="TNS",
dirPath=None):
"""*Render the results as MySQL Insert statements*
**Key Arguments:**
- ``tableNamePrefix`` -- the prefix for the database table names to assign the insert statements to. Default *TNS*.
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `mysqlSources` -- the top-level transient data
- `mysqlPhot` -- all photometry associated with the transients
- `mysqlSpec` -- all spectral data associated with the transients
- `mysqlFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in mysql insert format:
.. code-block:: python
mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles = tns.mysql("TNS")
print mysqlSources
.. code-block:: text
INSERT INTO `TNS_sources` (TNSId,TNSName,dateCreated,decDeg,decSex,discDate,discMag,discMagFilter,discSurvey,discoveryName,hostName,hostRedshift,objectUrl,raDeg,raSex,separationArcsec,separationEastArcsec,separationNorthArcsec,specType,transRedshift) VALUES ("2016asf" ,"SN2016asf" ,"2016-09-20T11:22:13" ,"31.1126" ,"+31:06:45.36" ,"2016-03-06 08:09:36" ,"17.1" ,"V-Johnson" ,"ASAS-SN" ,"ASASSN-16cs" ,"KUG 0647+311" ,null ,"http://wis-tns.weizmann.ac.il/object/2016asf" ,"102.653041667" ,"06:50:36.73" ,"0.66" ,"-0.13" ,"0.65" ,"SN Ia" ,"0.021") ON DUPLICATE KEY UPDATE TNSId="2016asf", TNSName="SN2016asf", dateCreated="2016-09-20T11:22:13", decDeg="31.1126", decSex="+31:06:45.36", discDate="2016-03-06 08:09:36", discMag="17.1", discMagFilter="V-Johnson", discSurvey="ASAS-SN", discoveryName="ASASSN-16cs", hostName="KUG 0647+311", hostRedshift=null, objectUrl="http://wis-tns.weizmann.ac.il/object/2016asf", raDeg="102.653041667", raSex="06:50:36.73", separationArcsec="0.66", separationEastArcsec="-0.13", separationNorthArcsec="0.65", specType="SN Ia", transRedshift="0.021", updated=1, dateLastModified=NOW() ;
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.mysql("TNS", "~/tns")
.. image:: https://i.imgur.com/CozySPW.png
:width: 800px
:alt: mysql output
"""
if dirPath:
p = self._file_prefix()
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_sources` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(20) NOT NULL,
`TNSName` varchar(20) DEFAULT NULL,
`dateCreated` datetime DEFAULT NULL,
`decDeg` double DEFAULT NULL,
`decSex` varchar(45) DEFAULT NULL,
`discDate` datetime DEFAULT NULL,
`discMag` double DEFAULT NULL,
`discMagFilter` varchar(45) DEFAULT NULL,
`discSurvey` varchar(100) DEFAULT NULL,
`discoveryName` varchar(100) DEFAULT NULL,
`objectUrl` varchar(200) DEFAULT NULL,
`raDeg` double DEFAULT NULL,
`raSex` varchar(45) DEFAULT NULL,
`specType` varchar(100) DEFAULT NULL,
`transRedshift` double DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`hostName` VARCHAR(100) NULL DEFAULT NULL,
`hostRedshift` DOUBLE NULL DEFAULT NULL,
`survey` VARCHAR(100) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid` (`TNSId`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlSources = self.sourceResults.mysql(
tableNamePrefix + "_sources", filepath=dirPath + "/" + p + "sources.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_photometry` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(20) NOT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`exptime` double DEFAULT NULL,
`filter` varchar(100) DEFAULT NULL,
`limitingMag` tinyint(4) DEFAULT NULL,
`mag` double DEFAULT NULL,
`magErr` double DEFAULT NULL,
`magUnit` varchar(100) DEFAULT NULL,
`objectName` varchar(100) DEFAULT NULL,
`obsdate` datetime DEFAULT NULL,
`reportAddedDate` datetime DEFAULT NULL,
`suggestedType` varchar(100) DEFAULT NULL,
`survey` varchar(100) DEFAULT NULL,
`telescope` varchar(100) DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`remarks` VARCHAR(800) NULL DEFAULT NULL,
`sourceComment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid_survey_obsdate` (`TNSId`,`survey`,`obsdate`),
UNIQUE INDEX `u_tnsid_survey_obsdate` (`TNSId` ASC, `survey` ASC, `obsdate` ASC),
UNIQUE INDEX `u_tnsid_obsdate_objname` (`TNSId` ASC, `obsdate` ASC, `objectName` ASC)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlPhot = self.photResults.mysql(
tableNamePrefix + "_photometry", filepath=dirPath + "/" + p + "phot.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_spectra` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(45) NOT NULL,
`TNSuser` varchar(45) DEFAULT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`exptime` double DEFAULT NULL,
`obsdate` datetime DEFAULT NULL,
`reportAddedDate` datetime DEFAULT NULL,
`specType` varchar(100) DEFAULT NULL,
`survey` varchar(100) DEFAULT NULL,
`telescope` varchar(100) DEFAULT NULL,
`transRedshift` double DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`remarks` VARCHAR(800) NULL DEFAULT NULL,
`sourceComment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `u_tnsid_survey_obsdate` (`TNSId`,`survey`,`obsdate`),
UNIQUE KEY `u_id_user_obsdate` (`TNSId`,`TNSuser`,`obsdate`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlSpec = self.specResults.mysql(
tableNamePrefix + "_spectra", filepath=dirPath + "/" + p + "spec.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_files` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(100) NOT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`dateObs` datetime DEFAULT NULL,
`filename` varchar(200) DEFAULT NULL,
`spec1phot2` tinyint(4) DEFAULT NULL,
`url` varchar(800) DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`comment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid_url` (`TNSId`,`url`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlFiles = self.relatedFilesResults.mysql(
tableNamePrefix + "_files", filepath=dirPath + "/" + p + "relatedFiles.sql", createStatement=createStatement)
else:
mysqlSources = self.sourceResults.mysql(
tableNamePrefix + "_sources")
mysqlPhot = self.photResults.mysql(tableNamePrefix + "_photometry")
mysqlSpec = self.specResults.mysql(tableNamePrefix + "_spectra")
mysqlFiles = self.relatedFilesResults.mysql(
tableNamePrefix + "_files")
return mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles
def _query_tns(self):
"""
*determine how to query the TNS, send query and parse the results*
**Return:**
- ``results`` -- a list of dictionaries (one dictionary for each result set returned from the TNS)
"""
self.log.info('starting the ``get`` method')
sourceTable = []
photoTable = []
specTable = []
relatedFilesTable = []
# THIS stop IS TO KEEP TRACK OF THE TNS PAGINATION IF MANY RESULT PAGES
# ARE RETURNED
stop = False
sourceCount = 0
while not stop:
status_code, content, self._searchURL = self._get_tns_search_results()
if status_code != 200:
self.log.error(
'cound not get the search reuslts from the TNS, HTML error code %(status_code)s ' % locals())
return None
if "No results found" in content:
print "No results found"
return sourceTable, photoTable, specTable, relatedFilesTable
if self._parse_transient_rows(content, True) < self.batchSize:
stop = True
else:
self.page += 1
thisPage = self.page
print "Downloaded %(thisPage)s page(s) from the TNS. %(sourceCount)s transients parsed so far." % locals()
sourceCount += self.batchSize
print "\t" + self._searchURL
timesleep.sleep(1)
# PARSE ALL ROWS RETURNED
for transientRow in self._parse_transient_rows(content):
# TOP LEVEL DISCOVERY CONTENT
sourceContent = transientRow.group()
discInfo, TNSId = self._parse_discovery_information(
sourceContent)
sourceTable.append(discInfo)
# PHOTOMETERY
phot, relatedFiles = self._parse_photometry_data(
sourceContent, TNSId)
photoTable += phot
relatedFilesTable += relatedFiles
# SPECTRA
spec, relatedFiles = self._parse_spectral_data(
sourceContent, TNSId)
specTable += spec
relatedFilesTable += relatedFiles
# SORT BY SEPARATION FROM THE SEARCH COORDINATES
try:
sourceTable = sorted(sourceTable, key=itemgetter(
'separationArcsec'), reverse=False)
except:
pass
self.log.info('completed the ``get`` method')
return sourceTable, photoTable, specTable, relatedFilesTable
def _get_tns_search_results(
self):
"""
*query the tns and result the response*
"""
self.log.info('starting the ``_get_tns_search_results`` method')
try:
response = requests.get(
url="http://wis-tns.weizmann.ac.il/search",
params={
"page": self.page,
"ra": self.ra,
"decl": self.dec,
"radius": self.radiusArcsec,
"name": self.name,
"internal_name": self.internal_name,
"date_start[date]": self.start,
"date_end[date]": self.end,
"num_page": self.batchSize,
"display[redshift]": "1",
"display[hostname]": "1",
"display[host_redshift]": "1",
"display[source_group_name]": "1",
"display[internal_name]": "1",
"display[spectra_count]": "1",
"display[discoverymag]": "1",
"display[discmagfilter]": "1",
"display[discoverydate]": "1",
"display[discoverer]": "1",
"display[sources]": "1",
"display[bibcode]": "1",
},
)
except requests.exceptions.RequestException:
print('HTTP Request failed')
self.log.info('completed the ``_get_tns_search_results`` method')
return response.status_code, response.content, response.url
def _file_prefix(
self):
"""*Generate a file prefix based on the type of search for saving files to disk*
**Return:**
- ``prefix`` -- the file prefix
"""
self.log.info('starting the ``_file_prefix`` method')
if self.ra:
now = datetime.now()
prefix = now.strftime("%Y%m%dt%H%M%S%f_tns_conesearch_")
elif self.name:
prefix = self.name + "_tns_conesearch_"
elif self.internal_name:
prefix = self.internal_name + "_tns_conesearch_"
elif self.discInLastDays:
discInLastDays = str(self.discInLastDays)
now = datetime.now()
prefix = now.strftime(
discInLastDays + "d_since_%Y%m%d_tns_conesearch_")
self.log.info('completed the ``_file_prefix`` method')
return prefix
def _parse_transient_rows(
self,
content,
count=False):
"""* parse transient rows from the TNS result page content*
**Key Arguments:**
- ``content`` -- the content from the TNS results page.
- ``count`` -- return only the number of rows
**Return:**
- ``transientRows``
"""
self.log.info('starting the ``_parse_transient_rows`` method')
regexForRow = r"""\n([^\n]*?<a href="/object/.*?)(?=\n[^\n]*?<a href="/object/|<\!\-\- /\.section, /#content \-\->)"""
if count:
# A SINGLE SOURCE BLOCK
matchedSources = re.findall(
regexForRow,
content,
flags=re.S # re.S
)
return len(matchedSources)
# A SINGLE SOURCE BLOCK
matchedSources = re.finditer(
regexForRow,
content,
flags=re.S # re.S
)
self.log.info('completed the ``_parse_transient_rows`` method')
return matchedSources
def _parse_discovery_information(
self,
content):
"""* parse discovery information from one row on the TNS results page*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page.
**Return:**
- ``discoveryData`` -- dictionary of results
- ``TNSId`` -- the unique TNS id for the transient
"""
self.log.info('starting the ``_parse_discovery_information`` method')
# ASTROCALC UNIT CONVERTER OBJECT
converter = unit_conversion(
log=self.log
)
matches = re.finditer(
r"""<tr class="row-.*?"><td class="cell-id">(?P<tnsId>\d*?)</td><td class="cell-name"><a href="(?P<objectUrl>.*?)">(?P<TNSName>.*?)</a></td><td class="cell-.*?<td class="cell-ra">(?P<raSex>.*?)</td><td class="cell-decl">(?P<decSex>.*?)</td><td class="cell-ot_name">(?P<specType>.*?)</td><td class="cell-redshift">(?P<transRedshift>.*?)</td><td class="cell-hostname">(?P<hostName>.*?)</td><td class="cell-host_redshift">(?P<hostRedshift>.*?)</td><td class="cell-source_group_name">(?P<discSurvey>.*?)</td>.*?<td class="cell-internal_name">(<a.*?>)?(?P<discoveryName>.*?)(</a>)?</td>.*?<td class="cell-discoverymag">(?P<discMag>.*?)</td><td class="cell-disc_filter_name">(?P<discMagFilter>.*?)</td><td class="cell-discoverydate">(?P<discDate>.*?)</td><td class="cell-discoverer">(?P<sender>.*?)</td>.*?</tr>""",
content,
flags=0 # re.S
)
discoveryData = []
for match in matches:
row = match.groupdict()
for k, v in row.iteritems():
row[k] = v.strip()
if len(v) == 0:
row[k] = None
if row["transRedshift"] == 0:
row["transRedshift"] = None
if row["TNSName"][0] in ["1", "2"]:
row["TNSName"] = "SN" + row["TNSName"]
row["objectUrl"] = "http://wis-tns.weizmann.ac.il" + \
row["objectUrl"]
# CONVERT COORDINATES TO DECIMAL DEGREES
row["raDeg"] = converter.ra_sexegesimal_to_decimal(
ra=row["raSex"]
)
row["decDeg"] = converter.dec_sexegesimal_to_decimal(
dec=row["decSex"]
)
# IF THIS IS A COORDINATE SEARCH, ADD SEPARATION FROM
# ORIGINAL QUERY COORDINATES
if self.ra:
# CALCULATE SEPARATION IN ARCSEC
from astrocalc.coords import separations
calculator = separations(
log=self.log,
ra1=self.ra,
dec1=self.dec,
ra2=row["raDeg"],
dec2=row["decDeg"],
)
angularSeparation, north, east = calculator.get()
row["separationArcsec"] = angularSeparation
row["separationNorthArcsec"] = north
row["separationEastArcsec"] = east
if not row["discSurvey"]:
row["survey"] = row["sender"]
del row["sender"]
del row["tnsId"]
row["TNSName"] = row["TNSName"].replace(" ", "")
row["TNSId"] = row["TNSName"].replace(
"SN", "").replace("AT", "")
TNSId = row["TNSId"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "TNSName", "discoveryName", "discSurvey", "raSex", "decSex", "raDeg", "decDeg",
"transRedshift", "specType", "discMag", "discMagFilter", "discDate", "objectUrl", "hostName", "hostRedshift", "separationArcsec", "separationNorthArcsec", "separationEastArcsec"]
for k, v in row.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = row[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
discoveryData.append(row)
self.log.info('completed the ``_parse_discovery_information`` method')
return discoveryData[0], TNSId
def _parse_photometry_data(
self,
content,
TNSId):
"""*parse photometry data from a row in the tns results content*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page
- ``TNSId`` -- the tns id of the transient
**Return:**
- ``photData`` -- a list of dictionaries of the photometry data
- ``relatedFilesTable`` -- a list of dictionaries of transient photometry related files
"""
self.log.info('starting the ``_parse_photometry_data`` method')
photData = []
relatedFilesTable = []
# AT REPORT BLOCK
ATBlock = re.search(
r"""<tr class=[^\n]*?AT reportings.*?(?=<tr class=[^\n]*?Classification reportings|$)""",
content,
flags=re.S # re.S
)
if ATBlock:
ATBlock = ATBlock.group()
reports = re.finditer(
r"""<tr class="row-[^"]*"><td class="cell-id">.*?</table>""",
ATBlock,
flags=re.S # re.S
)
relatedFiles = self._parse_related_files(ATBlock)
for r in reports:
header = re.search(
r"""<tr class="row[^"]*".*?time_received">(?P<reportAddedDate>[^<]*).*?user_name">(?P<sender>[^<]*).*?reporter_name">(?P<reporters>[^<]*).*?source_group_name">(?P<surveyGroup>[^<]*).*?ra">(?P<ra>[^<]*).*?decl">(?P<dec>[^<]*).*?discovery_date">(?P<obsDate>[^<]*).*?flux">(?P<mag>[^<]*).*?filter_name">(?P<magFilter>[^<]*).*?related_files">(?P<relatedFiles>[^<]*).*?type_name">(?P<suggestedType>[^<]*).*?hostname">(?P<hostName>[^<]*).*?host_redshift">(?P<hostRedshift>[^<]*).*?internal_name">(?P<objectName>[^<]*).*?groups">(?P<survey>[^<]*).*?remarks">(?P<sourceComment>[^<]*)""",
r.group(),
flags=0 # re.S
)
try:
header = header.groupdict()
except:
print r.group()
header["TNSId"] = TNSId
del header["reporters"]
del header["surveyGroup"]
del header["hostName"]
del header["hostRedshift"]
del header["mag"]
del header["magFilter"]
del header["obsDate"]
del header["ra"]
del header["dec"]
if not self.comments:
del header['sourceComment']
else:
theseComments = header[
"sourceComment"].split("\n")
header["sourceComment"] = ""
for c in theseComments:
header["sourceComment"] += " " + c.strip()
header["sourceComment"] = header[
"sourceComment"].strip().replace('"', "'")[0:750]
phot = re.finditer(
r"""<tr class="row\-[^"]*".*?obsdate">(?P<obsdate>[^<]*).*?flux">(?P<mag>[^<]*).*?fluxerr">(?P<magErr>[^<]*).*?limflux">(?P<limitingMag>[^<]*).*?unit_name">(?P<magUnit>[^<]*).*?filter_name">(?P<filter>[^<]*).*?tel_inst">(?P<telescope>[^<]*).*?exptime">(?P<exptime>[^<]*).*?observer">(?P<observer>[^<]*).*?-remarks">(?P<remarks>[^<]*)""",
r.group(),
flags=0 # re.S
)
filesAppended = False
for p in phot:
p = p.groupdict()
del p["observer"]
if p["limitingMag"] and not p["mag"]:
p["mag"] = p["limitingMag"]
p["limitingMag"] = 1
p["remarks"] = p["remarks"].replace(
"[Last non detection]", "")
else:
p["limitingMag"] = 0
if not self.comments:
del p["remarks"]
p.update(header)
if p["relatedFiles"] and filesAppended == False:
filesAppended = True
for f in relatedFiles:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = f[
"filepath"].split("/")[-1]
thisFile["url"] = f["filepath"]
if self.comments:
thisFile["comment"] = f[
"fileComment"].replace("\n", " ").strip().replace('"', "'")[0:750]
thisFile["dateObs"] = p["obsdate"]
thisFile["spec1phot2"] = 2
relatedFilesTable.append(thisFile)
if not p["survey"] and not p["objectName"]:
p["survey"] = p["sender"]
del p["relatedFiles"]
del p["sender"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "survey", "obsdate", "filter", "limitingMag", "mag", "magErr",
"magUnit", "suggestedType", "telescope", "exptime", "reportAddedDate"]
for k, v in p.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = p[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
photData.append(orow)
self.log.info('completed the ``_parse_photometry_data`` method')
return photData, relatedFilesTable
def _parse_related_files(
self,
content):
"""*parse the contents for related files URLs and comments*
**Key Arguments:**
- ``content`` -- the content to parse.
**Return:**
- ``relatedFiles`` -- a list of dictionaries of transient related files
"""
self.log.info('starting the ``_parse_related_files`` method')
relatedFilesList = re.finditer(
r"""<td class="cell-filename">.*?href="(?P<filepath>[^"]*).*?remarks">(?P<fileComment>[^<]*)""",
content,
flags=0 # re.S
)
relatedFiles = []
for f in relatedFilesList:
f = f.groupdict()
relatedFiles.append(f)
self.log.info('completed the ``_parse_related_files`` method')
return relatedFiles
def _parse_spectral_data(
self,
content,
TNSId):
"""*parse spectra data from a row in the tns results content*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page
- ``TNSId`` -- the tns id of the transient
**Return:**
- ``specData`` -- a list of dictionaries of the spectral data
- ``relatedFilesTable`` -- a list of dictionaries of transient spectrum related files
"""
self.log.info('starting the ``_parse_spectral_data`` method')
specData = []
relatedFilesTable = []
# CLASSIFICATION BLOCK
classBlock = re.search(
r"""<tr class=[^\n]*?Classification reportings.*$""",
content,
flags=re.S # re.S
)
if classBlock:
classBlock = classBlock.group()
reports = re.finditer(
r"""<tr class="row-[^"]*"><td class="cell-id">.*?</tbody>\s*</table>\s*</div></td> </tr>\s*</tbody>\s*</table>\s*</div></td> </tr>""",
classBlock,
flags=re.S #
)
relatedFiles = self._parse_related_files(classBlock)
for r in reports:
header = re.search(
r"""<tr class="row.*?time_received">(?P<reportAddedDate>[^<]*).*?user_name">(?P<TNSuser>[^<]*).*?classifier_name">(?P<reporters>[^<]*).*?source_group_name">(?P<survey>[^<]*).*?-type">(?P<specType>[^<]*).*?-redshift">(?P<transRedshift>[^<]*).*?-related_files">(?P<relatedFiles>[^<]*).*?-groups">(?P<surveyGroup>[^<]*).*?-remarks">(?P<sourceComment>[^<]*)</td>""",
r.group(),
flags=re.S # re.S
)
if not header:
continue
header = header.groupdict()
header["TNSId"] = TNSId
del header["reporters"]
del header["surveyGroup"]
del header["survey"]
if not self.comments:
del header['sourceComment']
else:
theseComments = header[
"sourceComment"].split("\n")
header["sourceComment"] = ""
for c in theseComments:
header["sourceComment"] += " " + c.strip()
header["sourceComment"] = header[
"sourceComment"].strip().replace('"', "'")[0:750]
spec = re.finditer(
r"""<tr class="class-results-.*?-obsdate">(?P<obsdate>[^<]*).*?-tel_inst">(?P<telescope>[^<]*).*?-exptime">(?P<exptime>[^<]*).*?-observer">(?P<sender>[^<]*).*?-reducer">(?P<reducer>[^<]*).*?-source_group_name">(?P<survey>[^<]*).*?-asciifile">(.*?<a href="(?P<filepath>[^"]*)".*?</a>)?.*?-fitsfile">(.*?<a href="(?P<fitsFilepath>[^"]*)".*?</a>)?.*?-groups">(?P<surveyGroup>[^<]*).*?-remarks">(?P<remarks>[^<]*)""",
r.group(),
flags=0 # re.S
)
filesAppended = False
for s in spec:
s = s.groupdict()
del s["sender"]
del s["surveyGroup"]
del s["reducer"]
if not self.comments:
del s["remarks"]
else:
s["remarks"] = s["remarks"].replace('"', "'")[0:750]
s.update(header)
if s["relatedFiles"] and filesAppended == False:
filesAppended = True
for f in relatedFiles:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = f[
"filepath"].split("/")[-1]
thisFile["url"] = f["filepath"]
if self.comments:
thisFile["comment"] = f[
"fileComment"].replace("\n", " ").strip()
thisFile["dateObs"] = s["obsdate"]
thisFile["spec1phot2"] = 1
relatedFilesTable.append(thisFile)
for ffile in [s["filepath"], s["fitsFilepath"]]:
if ffile:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = ffile.split(
"/")[-1]
thisFile["url"] = ffile
if self.comments:
thisFile["comment"] = ""
thisFile["dateObs"] = s["obsdate"]
thisFile["spec1phot2"] = 1
relatedFilesTable.append(thisFile)
del s["filepath"]
del s["fitsFilepath"]
del s["relatedFiles"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "survey", "obsdate", "specType", "transRedshift",
"telescope", "exptime", "reportAddedDate", "TNSuser"]
for k, v in s.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = s[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
specData.append(orow)
self.log.info('completed the ``_parse_spectral_data`` method')
return specData, relatedFilesTable
|
thespacedoctor/transientNamer
|
transientNamer/search.py
|
search.csv
|
python
|
def csv(
self,
dirPath=None):
if dirPath:
p = self._file_prefix()
csvSources = self.sourceResults.csv(
filepath=dirPath + "/" + p + "sources.csv")
csvPhot = self.photResults.csv(
filepath=dirPath + "/" + p + "phot.csv")
csvSpec = self.specResults.csv(
filepath=dirPath + "/" + p + "spec.csv")
csvFiles = self.relatedFilesResults.csv(
filepath=dirPath + "/" + p + "relatedFiles.csv")
else:
csvSources = self.sourceResults.csv()
csvPhot = self.photResults.csv()
csvSpec = self.specResults.csv()
csvFiles = self.relatedFilesResults.csv()
return csvSources, csvPhot, csvSpec, csvFiles
|
*Render the results in csv format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `csvSources` -- the top-level transient data
- `csvPhot` -- all photometry associated with the transients
- `csvSpec` -- all spectral data associated with the transients
- `csvFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in csv format:
.. code-block:: python
csvSources, csvPhot, csvSpec, csvFiles = tns.csv()
print csvSources
.. code-block:: text
TNSId,TNSName,discoveryName,discSurvey,raSex,decSex,raDeg,decDeg,transRedshift,specType,discMag,discMagFilter,discDate,objectUrl,hostName,hostRedshift,separationArcsec,separationNorthArcsec,separationEastArcsec
2016asf,SN2016asf,ASASSN-16cs,ASAS-SN,06:50:36.73,+31:06:45.36,102.6530,31.1126,0.021,SN Ia,17.1,V-Johnson,2016-03-06 08:09:36,http://wis-tns.weizmann.ac.il/object/2016asf,KUG 0647+311,,0.66,0.65,-0.13
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.csv("~/tns")
.. image:: https://i.imgur.com/BwwqMBg.png
:width: 800px
:alt: csv output
|
train
|
https://github.com/thespacedoctor/transientNamer/blob/39be410c84275ed4669632f5df67e728d66a318f/transientNamer/search.py#L226-L280
|
[
"def _file_prefix(\n self):\n \"\"\"*Generate a file prefix based on the type of search for saving files to disk*\n\n **Return:**\n - ``prefix`` -- the file prefix\n \"\"\"\n self.log.info('starting the ``_file_prefix`` method')\n\n if self.ra:\n now = datetime.now()\n prefix = now.strftime(\"%Y%m%dt%H%M%S%f_tns_conesearch_\")\n elif self.name:\n prefix = self.name + \"_tns_conesearch_\"\n elif self.internal_name:\n prefix = self.internal_name + \"_tns_conesearch_\"\n elif self.discInLastDays:\n discInLastDays = str(self.discInLastDays)\n now = datetime.now()\n prefix = now.strftime(\n discInLastDays + \"d_since_%Y%m%d_tns_conesearch_\")\n\n self.log.info('completed the ``_file_prefix`` method')\n return prefix\n"
] |
class search():
"""
*The worker class for the transient namer search module*
**Key Arguments:**
- ``log`` -- logger
- ``settings`` -- the settings dictionary
- ``ra`` -- RA of the location being checked
- ``dec`` -- DEC of the location being searched
- ``radiusArcsec`` - the radius of the conesearch to perform against the TNS
- ``name`` -- name of the object to search the TNS for
- ``discInLastDays`` -- search the TNS for transient discovered in the last X days
- ``comments`` -- print the comments from the TNS, note these can be long making table outputs somewhat unreadable. Default *False*
**Usage:**
To initiate a search object to search the TNS via an object name (either TNS or survey names accepted):
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
name="Gaia16bbi"
)
or for a conesearch use something similar to:
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
ra="06:50:36.74",
dec="+31:06:44.7",
radiusArcsec=5
)
Note the search method can accept coordinates in sexagesimal or decimal defree formats.
To list all new objects discovered in the last three weeks, then use:
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
discInLastDays=21
)
"""
# Initialisation
def __init__(
self,
log,
ra="",
dec="",
radiusArcsec="",
name="",
discInLastDays="",
settings=False,
comments=False
):
self.log = log
log.debug("instansiating a new 'search' object")
self.settings = settings
self.ra = ra
self.dec = dec
self.radiusArcsec = radiusArcsec
self.comments = comments
self.name = name
self.internal_name = ""
self.discInLastDays = discInLastDays
self.page = 0
self.batchSize = 1000
# CREATE THE TIME-RANGE WINDOW TO SEARCH TNS
if not discInLastDays:
self.start = ""
self.end = ""
else:
discInLastDays = int(discInLastDays)
td = timedelta(days=1)
end = datetime.now() + td
self.end = end.strftime("%Y-%m-%d")
td = timedelta(days=discInLastDays)
start = datetime.now() - td
self.start = start.strftime("%Y-%m-%d")
# DETERMINE IF WE HAVE A TNS OR INTERAL SURVEY NAME
if self.name:
matchObject = re.match(r'^((SN|AT) ?)?(\d{4}\w{1,6})', self.name)
if matchObject:
self.name = matchObject.group(3)
else:
self.internal_name = self.name
self.name = ""
# DO THE SEARCH OF THE TNS AND COMPILE THE RESULTS INTO SEPARATE RESULT
# SETS
self.sourceResultsList, self.photResultsList, self.specResultsList, self.relatedFilesResultsList = self._query_tns()
self.sourceResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.sourceResultsList
)
self.photResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.photResultsList
)
self.specResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.specResultsList
)
self.relatedFilesResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.relatedFilesResultsList
)
return None
@property
def sources(
self):
"""*The results of the search returned as a python list of dictionaries*
**Usage:**
.. code-block:: python
sources = tns.sources
"""
sourceResultsList = []
sourceResultsList[:] = [dict(l) for l in self.sourceResultsList]
return sourceResultsList
@property
def spectra(
self):
"""*The associated source spectral data*
**Usage:**
.. code-block:: python
sourceSpectra = tns.spectra
"""
specResultsList = []
specResultsList[:] = [dict(l) for l in self.specResultsList]
return specResultsList
@property
def files(
self):
"""*The associated source files*
**Usage:**
.. code-block:: python
sourceFiles = tns.files
"""
relatedFilesResultsList = []
relatedFilesResultsList[:] = [dict(l)
for l in self.relatedFilesResultsList]
return relatedFilesResultsList
@property
def photometry(
self):
"""*The associated source photometry*
**Usage:**
.. code-block:: python
sourcePhotometry = tns.photometry
"""
photResultsList = []
photResultsList[:] = [dict(l) for l in self.photResultsList]
return photResultsList
@property
def url(
self):
"""*The generated URL used for searching of the TNS*
**Usage:**
.. code-block:: python
searchURL = tns.url
"""
return self._searchURL
def json(
self,
dirPath=None):
"""*Render the results in json format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `jsonSources` -- the top-level transient data
- `jsonPhot` -- all photometry associated with the transients
- `jsonSpec` -- all spectral data associated with the transients
- `jsonFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in json format:
.. code-block:: python
jsonSources, jsonPhot, jsonSpec, jsonFiles = tns.json()
print jsonSources
.. code-block:: text
[
{
"TNSId": "2016asf",
"TNSName": "SN2016asf",
"decDeg": 31.1126,
"decSex": "+31:06:45.36",
"discDate": "2016-03-06 08:09:36",
"discMag": "17.1",
"discMagFilter": "V-Johnson",
"discSurvey": "ASAS-SN",
"discoveryName": "ASASSN-16cs",
"hostName": "KUG 0647+311",
"hostRedshift": null,
"objectUrl": "http://wis-tns.weizmann.ac.il/object/2016asf",
"raDeg": 102.65304166666667,
"raSex": "06:50:36.73",
"separationArcsec": "0.66",
"separationEastArcsec": "-0.13",
"separationNorthArcsec": "0.65",
"specType": "SN Ia",
"transRedshift": "0.021"
}
]
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.json("~/tns")
.. image:: https://i.imgur.com/wAHqARI.png
:width: 800px
:alt: json output
"""
if dirPath:
p = self._file_prefix()
jsonSources = self.sourceResults.json(
filepath=dirPath + "/" + p + "sources.json")
jsonPhot = self.photResults.json(
filepath=dirPath + "/" + p + "phot.json")
jsonSpec = self.specResults.json(
filepath=dirPath + "/" + p + "spec.json")
jsonFiles = self.relatedFilesResults.json(
filepath=dirPath + "/" + p + "relatedFiles.json")
else:
jsonSources = self.sourceResults.json()
jsonPhot = self.photResults.json()
jsonSpec = self.specResults.json()
jsonFiles = self.relatedFilesResults.json()
return jsonSources, jsonPhot, jsonSpec, jsonFiles
def yaml(
self,
dirPath=None):
"""*Render the results in yaml format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `yamlSources` -- the top-level transient data
- `yamlPhot` -- all photometry associated with the transients
- `yamlSpec` -- all spectral data associated with the transients
- `yamlFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in yaml format:
.. code-block:: python
yamlSources, yamlPhot, yamlSpec, yamlFiles = tns.yaml()
print yamlSources
.. code-block:: text
- TNSId: 2016asf
TNSName: SN2016asf
decDeg: 31.1126
decSex: '+31:06:45.36'
discDate: '2016-03-06 08:09:36'
discMag: '17.1'
discMagFilter: V-Johnson
discSurvey: ASAS-SN
discoveryName: ASASSN-16cs
hostName: KUG 0647+311
hostRedshift: null
objectUrl: http://wis-tns.weizmann.ac.il/object/2016asf
raDeg: 102.65304166666667
raSex: '06:50:36.73'
separationArcsec: '0.66'
separationEastArcsec: '-0.13'
separationNorthArcsec: '0.65'
specType: SN Ia
transRedshift: '0.021'
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.yaml("~/tns")
.. image:: https://i.imgur.com/ZpJIC6p.png
:width: 800px
:alt: yaml output
"""
if dirPath:
p = self._file_prefix()
yamlSources = self.sourceResults.yaml(
filepath=dirPath + "/" + p + "sources.yaml")
yamlPhot = self.photResults.yaml(
filepath=dirPath + "/" + p + "phot.yaml")
yamlSpec = self.specResults.yaml(
filepath=dirPath + "/" + p + "spec.yaml")
yamlFiles = self.relatedFilesResults.yaml(
filepath=dirPath + "/" + p + "relatedFiles.yaml")
else:
yamlSources = self.sourceResults.yaml()
yamlPhot = self.photResults.yaml()
yamlSpec = self.specResults.yaml()
yamlFiles = self.relatedFilesResults.yaml()
return yamlSources, yamlPhot, yamlSpec, yamlFiles
def markdown(
self,
dirPath=None):
"""*Render the results in markdown format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `markdownSources` -- the top-level transient data
- `markdownPhot` -- all photometry associated with the transients
- `markdownSpec` -- all spectral data associated with the transients
- `markdownFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in markdown table format:
.. code-block:: python
markdownSources, markdownPhot, markdownSpec, markdownFiles = tns.markdown()
print markdownSources
.. code-block:: text
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
|:---------|:-----------|:---------------|:------------|:-------------|:--------------|:----------|:---------|:---------------|:----------|:---------|:---------------|:---------------------|:----------------------------------------------|:--------------|:--------------|:------------------|:-----------------------|:----------------------|
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.markdown("~/tns")
.. image:: https://i.imgur.com/AYLBQoJ.png
:width: 800px
:alt: markdown output
"""
if dirPath:
p = self._file_prefix()
markdownSources = self.sourceResults.markdown(
filepath=dirPath + "/" + p + "sources.md")
markdownPhot = self.photResults.markdown(
filepath=dirPath + "/" + p + "phot.md")
markdownSpec = self.specResults.markdown(
filepath=dirPath + "/" + p + "spec.md")
markdownFiles = self.relatedFilesResults.markdown(
filepath=dirPath + "/" + p + "relatedFiles.md")
else:
markdownSources = self.sourceResults.markdown()
markdownPhot = self.photResults.markdown()
markdownSpec = self.specResults.markdown()
markdownFiles = self.relatedFilesResults.markdown()
return markdownSources, markdownPhot, markdownSpec, markdownFiles
def table(
self,
dirPath=None):
"""*Render the results as an ascii table*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `tableSources` -- the top-level transient data
- `tablePhot` -- all photometry associated with the transients
- `tableSpec` -- all spectral data associated with the transients
- `tableFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in ascii table format:
.. code-block:: python
tableSources, tablePhot, tableSpec, tableFiles = tns.table()
print tableSources
.. code-block:: text
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.table("~/tns")
.. image:: https://i.imgur.com/m09M0ho.png
:width: 800px
:alt: ascii files
"""
if dirPath:
p = self._file_prefix()
tableSources = self.sourceResults.table(
filepath=dirPath + "/" + p + "sources.ascii")
tablePhot = self.photResults.table(
filepath=dirPath + "/" + p + "phot.ascii")
tableSpec = self.specResults.table(
filepath=dirPath + "/" + p + "spec.ascii")
tableFiles = self.relatedFilesResults.table(
filepath=dirPath + "/" + p + "relatedFiles.ascii")
else:
tableSources = self.sourceResults.table()
tablePhot = self.photResults.table()
tableSpec = self.specResults.table()
tableFiles = self.relatedFilesResults.table()
return tableSources, tablePhot, tableSpec, tableFiles
def mysql(
self,
tableNamePrefix="TNS",
dirPath=None):
"""*Render the results as MySQL Insert statements*
**Key Arguments:**
- ``tableNamePrefix`` -- the prefix for the database table names to assign the insert statements to. Default *TNS*.
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `mysqlSources` -- the top-level transient data
- `mysqlPhot` -- all photometry associated with the transients
- `mysqlSpec` -- all spectral data associated with the transients
- `mysqlFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in mysql insert format:
.. code-block:: python
mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles = tns.mysql("TNS")
print mysqlSources
.. code-block:: text
INSERT INTO `TNS_sources` (TNSId,TNSName,dateCreated,decDeg,decSex,discDate,discMag,discMagFilter,discSurvey,discoveryName,hostName,hostRedshift,objectUrl,raDeg,raSex,separationArcsec,separationEastArcsec,separationNorthArcsec,specType,transRedshift) VALUES ("2016asf" ,"SN2016asf" ,"2016-09-20T11:22:13" ,"31.1126" ,"+31:06:45.36" ,"2016-03-06 08:09:36" ,"17.1" ,"V-Johnson" ,"ASAS-SN" ,"ASASSN-16cs" ,"KUG 0647+311" ,null ,"http://wis-tns.weizmann.ac.il/object/2016asf" ,"102.653041667" ,"06:50:36.73" ,"0.66" ,"-0.13" ,"0.65" ,"SN Ia" ,"0.021") ON DUPLICATE KEY UPDATE TNSId="2016asf", TNSName="SN2016asf", dateCreated="2016-09-20T11:22:13", decDeg="31.1126", decSex="+31:06:45.36", discDate="2016-03-06 08:09:36", discMag="17.1", discMagFilter="V-Johnson", discSurvey="ASAS-SN", discoveryName="ASASSN-16cs", hostName="KUG 0647+311", hostRedshift=null, objectUrl="http://wis-tns.weizmann.ac.il/object/2016asf", raDeg="102.653041667", raSex="06:50:36.73", separationArcsec="0.66", separationEastArcsec="-0.13", separationNorthArcsec="0.65", specType="SN Ia", transRedshift="0.021", updated=1, dateLastModified=NOW() ;
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.mysql("TNS", "~/tns")
.. image:: https://i.imgur.com/CozySPW.png
:width: 800px
:alt: mysql output
"""
if dirPath:
p = self._file_prefix()
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_sources` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(20) NOT NULL,
`TNSName` varchar(20) DEFAULT NULL,
`dateCreated` datetime DEFAULT NULL,
`decDeg` double DEFAULT NULL,
`decSex` varchar(45) DEFAULT NULL,
`discDate` datetime DEFAULT NULL,
`discMag` double DEFAULT NULL,
`discMagFilter` varchar(45) DEFAULT NULL,
`discSurvey` varchar(100) DEFAULT NULL,
`discoveryName` varchar(100) DEFAULT NULL,
`objectUrl` varchar(200) DEFAULT NULL,
`raDeg` double DEFAULT NULL,
`raSex` varchar(45) DEFAULT NULL,
`specType` varchar(100) DEFAULT NULL,
`transRedshift` double DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`hostName` VARCHAR(100) NULL DEFAULT NULL,
`hostRedshift` DOUBLE NULL DEFAULT NULL,
`survey` VARCHAR(100) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid` (`TNSId`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlSources = self.sourceResults.mysql(
tableNamePrefix + "_sources", filepath=dirPath + "/" + p + "sources.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_photometry` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(20) NOT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`exptime` double DEFAULT NULL,
`filter` varchar(100) DEFAULT NULL,
`limitingMag` tinyint(4) DEFAULT NULL,
`mag` double DEFAULT NULL,
`magErr` double DEFAULT NULL,
`magUnit` varchar(100) DEFAULT NULL,
`objectName` varchar(100) DEFAULT NULL,
`obsdate` datetime DEFAULT NULL,
`reportAddedDate` datetime DEFAULT NULL,
`suggestedType` varchar(100) DEFAULT NULL,
`survey` varchar(100) DEFAULT NULL,
`telescope` varchar(100) DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`remarks` VARCHAR(800) NULL DEFAULT NULL,
`sourceComment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid_survey_obsdate` (`TNSId`,`survey`,`obsdate`),
UNIQUE INDEX `u_tnsid_survey_obsdate` (`TNSId` ASC, `survey` ASC, `obsdate` ASC),
UNIQUE INDEX `u_tnsid_obsdate_objname` (`TNSId` ASC, `obsdate` ASC, `objectName` ASC)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlPhot = self.photResults.mysql(
tableNamePrefix + "_photometry", filepath=dirPath + "/" + p + "phot.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_spectra` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(45) NOT NULL,
`TNSuser` varchar(45) DEFAULT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`exptime` double DEFAULT NULL,
`obsdate` datetime DEFAULT NULL,
`reportAddedDate` datetime DEFAULT NULL,
`specType` varchar(100) DEFAULT NULL,
`survey` varchar(100) DEFAULT NULL,
`telescope` varchar(100) DEFAULT NULL,
`transRedshift` double DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`remarks` VARCHAR(800) NULL DEFAULT NULL,
`sourceComment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `u_tnsid_survey_obsdate` (`TNSId`,`survey`,`obsdate`),
UNIQUE KEY `u_id_user_obsdate` (`TNSId`,`TNSuser`,`obsdate`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlSpec = self.specResults.mysql(
tableNamePrefix + "_spectra", filepath=dirPath + "/" + p + "spec.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_files` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(100) NOT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`dateObs` datetime DEFAULT NULL,
`filename` varchar(200) DEFAULT NULL,
`spec1phot2` tinyint(4) DEFAULT NULL,
`url` varchar(800) DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`comment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid_url` (`TNSId`,`url`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlFiles = self.relatedFilesResults.mysql(
tableNamePrefix + "_files", filepath=dirPath + "/" + p + "relatedFiles.sql", createStatement=createStatement)
else:
mysqlSources = self.sourceResults.mysql(
tableNamePrefix + "_sources")
mysqlPhot = self.photResults.mysql(tableNamePrefix + "_photometry")
mysqlSpec = self.specResults.mysql(tableNamePrefix + "_spectra")
mysqlFiles = self.relatedFilesResults.mysql(
tableNamePrefix + "_files")
return mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles
def _query_tns(self):
"""
*determine how to query the TNS, send query and parse the results*
**Return:**
- ``results`` -- a list of dictionaries (one dictionary for each result set returned from the TNS)
"""
self.log.info('starting the ``get`` method')
sourceTable = []
photoTable = []
specTable = []
relatedFilesTable = []
# THIS stop IS TO KEEP TRACK OF THE TNS PAGINATION IF MANY RESULT PAGES
# ARE RETURNED
stop = False
sourceCount = 0
while not stop:
status_code, content, self._searchURL = self._get_tns_search_results()
if status_code != 200:
self.log.error(
'cound not get the search reuslts from the TNS, HTML error code %(status_code)s ' % locals())
return None
if "No results found" in content:
print "No results found"
return sourceTable, photoTable, specTable, relatedFilesTable
if self._parse_transient_rows(content, True) < self.batchSize:
stop = True
else:
self.page += 1
thisPage = self.page
print "Downloaded %(thisPage)s page(s) from the TNS. %(sourceCount)s transients parsed so far." % locals()
sourceCount += self.batchSize
print "\t" + self._searchURL
timesleep.sleep(1)
# PARSE ALL ROWS RETURNED
for transientRow in self._parse_transient_rows(content):
# TOP LEVEL DISCOVERY CONTENT
sourceContent = transientRow.group()
discInfo, TNSId = self._parse_discovery_information(
sourceContent)
sourceTable.append(discInfo)
# PHOTOMETERY
phot, relatedFiles = self._parse_photometry_data(
sourceContent, TNSId)
photoTable += phot
relatedFilesTable += relatedFiles
# SPECTRA
spec, relatedFiles = self._parse_spectral_data(
sourceContent, TNSId)
specTable += spec
relatedFilesTable += relatedFiles
# SORT BY SEPARATION FROM THE SEARCH COORDINATES
try:
sourceTable = sorted(sourceTable, key=itemgetter(
'separationArcsec'), reverse=False)
except:
pass
self.log.info('completed the ``get`` method')
return sourceTable, photoTable, specTable, relatedFilesTable
def _get_tns_search_results(
self):
"""
*query the tns and result the response*
"""
self.log.info('starting the ``_get_tns_search_results`` method')
try:
response = requests.get(
url="http://wis-tns.weizmann.ac.il/search",
params={
"page": self.page,
"ra": self.ra,
"decl": self.dec,
"radius": self.radiusArcsec,
"name": self.name,
"internal_name": self.internal_name,
"date_start[date]": self.start,
"date_end[date]": self.end,
"num_page": self.batchSize,
"display[redshift]": "1",
"display[hostname]": "1",
"display[host_redshift]": "1",
"display[source_group_name]": "1",
"display[internal_name]": "1",
"display[spectra_count]": "1",
"display[discoverymag]": "1",
"display[discmagfilter]": "1",
"display[discoverydate]": "1",
"display[discoverer]": "1",
"display[sources]": "1",
"display[bibcode]": "1",
},
)
except requests.exceptions.RequestException:
print('HTTP Request failed')
self.log.info('completed the ``_get_tns_search_results`` method')
return response.status_code, response.content, response.url
def _file_prefix(
self):
"""*Generate a file prefix based on the type of search for saving files to disk*
**Return:**
- ``prefix`` -- the file prefix
"""
self.log.info('starting the ``_file_prefix`` method')
if self.ra:
now = datetime.now()
prefix = now.strftime("%Y%m%dt%H%M%S%f_tns_conesearch_")
elif self.name:
prefix = self.name + "_tns_conesearch_"
elif self.internal_name:
prefix = self.internal_name + "_tns_conesearch_"
elif self.discInLastDays:
discInLastDays = str(self.discInLastDays)
now = datetime.now()
prefix = now.strftime(
discInLastDays + "d_since_%Y%m%d_tns_conesearch_")
self.log.info('completed the ``_file_prefix`` method')
return prefix
def _parse_transient_rows(
self,
content,
count=False):
"""* parse transient rows from the TNS result page content*
**Key Arguments:**
- ``content`` -- the content from the TNS results page.
- ``count`` -- return only the number of rows
**Return:**
- ``transientRows``
"""
self.log.info('starting the ``_parse_transient_rows`` method')
regexForRow = r"""\n([^\n]*?<a href="/object/.*?)(?=\n[^\n]*?<a href="/object/|<\!\-\- /\.section, /#content \-\->)"""
if count:
# A SINGLE SOURCE BLOCK
matchedSources = re.findall(
regexForRow,
content,
flags=re.S # re.S
)
return len(matchedSources)
# A SINGLE SOURCE BLOCK
matchedSources = re.finditer(
regexForRow,
content,
flags=re.S # re.S
)
self.log.info('completed the ``_parse_transient_rows`` method')
return matchedSources
def _parse_discovery_information(
self,
content):
"""* parse discovery information from one row on the TNS results page*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page.
**Return:**
- ``discoveryData`` -- dictionary of results
- ``TNSId`` -- the unique TNS id for the transient
"""
self.log.info('starting the ``_parse_discovery_information`` method')
# ASTROCALC UNIT CONVERTER OBJECT
converter = unit_conversion(
log=self.log
)
matches = re.finditer(
r"""<tr class="row-.*?"><td class="cell-id">(?P<tnsId>\d*?)</td><td class="cell-name"><a href="(?P<objectUrl>.*?)">(?P<TNSName>.*?)</a></td><td class="cell-.*?<td class="cell-ra">(?P<raSex>.*?)</td><td class="cell-decl">(?P<decSex>.*?)</td><td class="cell-ot_name">(?P<specType>.*?)</td><td class="cell-redshift">(?P<transRedshift>.*?)</td><td class="cell-hostname">(?P<hostName>.*?)</td><td class="cell-host_redshift">(?P<hostRedshift>.*?)</td><td class="cell-source_group_name">(?P<discSurvey>.*?)</td>.*?<td class="cell-internal_name">(<a.*?>)?(?P<discoveryName>.*?)(</a>)?</td>.*?<td class="cell-discoverymag">(?P<discMag>.*?)</td><td class="cell-disc_filter_name">(?P<discMagFilter>.*?)</td><td class="cell-discoverydate">(?P<discDate>.*?)</td><td class="cell-discoverer">(?P<sender>.*?)</td>.*?</tr>""",
content,
flags=0 # re.S
)
discoveryData = []
for match in matches:
row = match.groupdict()
for k, v in row.iteritems():
row[k] = v.strip()
if len(v) == 0:
row[k] = None
if row["transRedshift"] == 0:
row["transRedshift"] = None
if row["TNSName"][0] in ["1", "2"]:
row["TNSName"] = "SN" + row["TNSName"]
row["objectUrl"] = "http://wis-tns.weizmann.ac.il" + \
row["objectUrl"]
# CONVERT COORDINATES TO DECIMAL DEGREES
row["raDeg"] = converter.ra_sexegesimal_to_decimal(
ra=row["raSex"]
)
row["decDeg"] = converter.dec_sexegesimal_to_decimal(
dec=row["decSex"]
)
# IF THIS IS A COORDINATE SEARCH, ADD SEPARATION FROM
# ORIGINAL QUERY COORDINATES
if self.ra:
# CALCULATE SEPARATION IN ARCSEC
from astrocalc.coords import separations
calculator = separations(
log=self.log,
ra1=self.ra,
dec1=self.dec,
ra2=row["raDeg"],
dec2=row["decDeg"],
)
angularSeparation, north, east = calculator.get()
row["separationArcsec"] = angularSeparation
row["separationNorthArcsec"] = north
row["separationEastArcsec"] = east
if not row["discSurvey"]:
row["survey"] = row["sender"]
del row["sender"]
del row["tnsId"]
row["TNSName"] = row["TNSName"].replace(" ", "")
row["TNSId"] = row["TNSName"].replace(
"SN", "").replace("AT", "")
TNSId = row["TNSId"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "TNSName", "discoveryName", "discSurvey", "raSex", "decSex", "raDeg", "decDeg",
"transRedshift", "specType", "discMag", "discMagFilter", "discDate", "objectUrl", "hostName", "hostRedshift", "separationArcsec", "separationNorthArcsec", "separationEastArcsec"]
for k, v in row.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = row[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
discoveryData.append(row)
self.log.info('completed the ``_parse_discovery_information`` method')
return discoveryData[0], TNSId
def _parse_photometry_data(
self,
content,
TNSId):
"""*parse photometry data from a row in the tns results content*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page
- ``TNSId`` -- the tns id of the transient
**Return:**
- ``photData`` -- a list of dictionaries of the photometry data
- ``relatedFilesTable`` -- a list of dictionaries of transient photometry related files
"""
self.log.info('starting the ``_parse_photometry_data`` method')
photData = []
relatedFilesTable = []
# AT REPORT BLOCK
ATBlock = re.search(
r"""<tr class=[^\n]*?AT reportings.*?(?=<tr class=[^\n]*?Classification reportings|$)""",
content,
flags=re.S # re.S
)
if ATBlock:
ATBlock = ATBlock.group()
reports = re.finditer(
r"""<tr class="row-[^"]*"><td class="cell-id">.*?</table>""",
ATBlock,
flags=re.S # re.S
)
relatedFiles = self._parse_related_files(ATBlock)
for r in reports:
header = re.search(
r"""<tr class="row[^"]*".*?time_received">(?P<reportAddedDate>[^<]*).*?user_name">(?P<sender>[^<]*).*?reporter_name">(?P<reporters>[^<]*).*?source_group_name">(?P<surveyGroup>[^<]*).*?ra">(?P<ra>[^<]*).*?decl">(?P<dec>[^<]*).*?discovery_date">(?P<obsDate>[^<]*).*?flux">(?P<mag>[^<]*).*?filter_name">(?P<magFilter>[^<]*).*?related_files">(?P<relatedFiles>[^<]*).*?type_name">(?P<suggestedType>[^<]*).*?hostname">(?P<hostName>[^<]*).*?host_redshift">(?P<hostRedshift>[^<]*).*?internal_name">(?P<objectName>[^<]*).*?groups">(?P<survey>[^<]*).*?remarks">(?P<sourceComment>[^<]*)""",
r.group(),
flags=0 # re.S
)
try:
header = header.groupdict()
except:
print r.group()
header["TNSId"] = TNSId
del header["reporters"]
del header["surveyGroup"]
del header["hostName"]
del header["hostRedshift"]
del header["mag"]
del header["magFilter"]
del header["obsDate"]
del header["ra"]
del header["dec"]
if not self.comments:
del header['sourceComment']
else:
theseComments = header[
"sourceComment"].split("\n")
header["sourceComment"] = ""
for c in theseComments:
header["sourceComment"] += " " + c.strip()
header["sourceComment"] = header[
"sourceComment"].strip().replace('"', "'")[0:750]
phot = re.finditer(
r"""<tr class="row\-[^"]*".*?obsdate">(?P<obsdate>[^<]*).*?flux">(?P<mag>[^<]*).*?fluxerr">(?P<magErr>[^<]*).*?limflux">(?P<limitingMag>[^<]*).*?unit_name">(?P<magUnit>[^<]*).*?filter_name">(?P<filter>[^<]*).*?tel_inst">(?P<telescope>[^<]*).*?exptime">(?P<exptime>[^<]*).*?observer">(?P<observer>[^<]*).*?-remarks">(?P<remarks>[^<]*)""",
r.group(),
flags=0 # re.S
)
filesAppended = False
for p in phot:
p = p.groupdict()
del p["observer"]
if p["limitingMag"] and not p["mag"]:
p["mag"] = p["limitingMag"]
p["limitingMag"] = 1
p["remarks"] = p["remarks"].replace(
"[Last non detection]", "")
else:
p["limitingMag"] = 0
if not self.comments:
del p["remarks"]
p.update(header)
if p["relatedFiles"] and filesAppended == False:
filesAppended = True
for f in relatedFiles:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = f[
"filepath"].split("/")[-1]
thisFile["url"] = f["filepath"]
if self.comments:
thisFile["comment"] = f[
"fileComment"].replace("\n", " ").strip().replace('"', "'")[0:750]
thisFile["dateObs"] = p["obsdate"]
thisFile["spec1phot2"] = 2
relatedFilesTable.append(thisFile)
if not p["survey"] and not p["objectName"]:
p["survey"] = p["sender"]
del p["relatedFiles"]
del p["sender"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "survey", "obsdate", "filter", "limitingMag", "mag", "magErr",
"magUnit", "suggestedType", "telescope", "exptime", "reportAddedDate"]
for k, v in p.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = p[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
photData.append(orow)
self.log.info('completed the ``_parse_photometry_data`` method')
return photData, relatedFilesTable
def _parse_related_files(
self,
content):
"""*parse the contents for related files URLs and comments*
**Key Arguments:**
- ``content`` -- the content to parse.
**Return:**
- ``relatedFiles`` -- a list of dictionaries of transient related files
"""
self.log.info('starting the ``_parse_related_files`` method')
relatedFilesList = re.finditer(
r"""<td class="cell-filename">.*?href="(?P<filepath>[^"]*).*?remarks">(?P<fileComment>[^<]*)""",
content,
flags=0 # re.S
)
relatedFiles = []
for f in relatedFilesList:
f = f.groupdict()
relatedFiles.append(f)
self.log.info('completed the ``_parse_related_files`` method')
return relatedFiles
def _parse_spectral_data(
self,
content,
TNSId):
"""*parse spectra data from a row in the tns results content*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page
- ``TNSId`` -- the tns id of the transient
**Return:**
- ``specData`` -- a list of dictionaries of the spectral data
- ``relatedFilesTable`` -- a list of dictionaries of transient spectrum related files
"""
self.log.info('starting the ``_parse_spectral_data`` method')
specData = []
relatedFilesTable = []
# CLASSIFICATION BLOCK
classBlock = re.search(
r"""<tr class=[^\n]*?Classification reportings.*$""",
content,
flags=re.S # re.S
)
if classBlock:
classBlock = classBlock.group()
reports = re.finditer(
r"""<tr class="row-[^"]*"><td class="cell-id">.*?</tbody>\s*</table>\s*</div></td> </tr>\s*</tbody>\s*</table>\s*</div></td> </tr>""",
classBlock,
flags=re.S #
)
relatedFiles = self._parse_related_files(classBlock)
for r in reports:
header = re.search(
r"""<tr class="row.*?time_received">(?P<reportAddedDate>[^<]*).*?user_name">(?P<TNSuser>[^<]*).*?classifier_name">(?P<reporters>[^<]*).*?source_group_name">(?P<survey>[^<]*).*?-type">(?P<specType>[^<]*).*?-redshift">(?P<transRedshift>[^<]*).*?-related_files">(?P<relatedFiles>[^<]*).*?-groups">(?P<surveyGroup>[^<]*).*?-remarks">(?P<sourceComment>[^<]*)</td>""",
r.group(),
flags=re.S # re.S
)
if not header:
continue
header = header.groupdict()
header["TNSId"] = TNSId
del header["reporters"]
del header["surveyGroup"]
del header["survey"]
if not self.comments:
del header['sourceComment']
else:
theseComments = header[
"sourceComment"].split("\n")
header["sourceComment"] = ""
for c in theseComments:
header["sourceComment"] += " " + c.strip()
header["sourceComment"] = header[
"sourceComment"].strip().replace('"', "'")[0:750]
spec = re.finditer(
r"""<tr class="class-results-.*?-obsdate">(?P<obsdate>[^<]*).*?-tel_inst">(?P<telescope>[^<]*).*?-exptime">(?P<exptime>[^<]*).*?-observer">(?P<sender>[^<]*).*?-reducer">(?P<reducer>[^<]*).*?-source_group_name">(?P<survey>[^<]*).*?-asciifile">(.*?<a href="(?P<filepath>[^"]*)".*?</a>)?.*?-fitsfile">(.*?<a href="(?P<fitsFilepath>[^"]*)".*?</a>)?.*?-groups">(?P<surveyGroup>[^<]*).*?-remarks">(?P<remarks>[^<]*)""",
r.group(),
flags=0 # re.S
)
filesAppended = False
for s in spec:
s = s.groupdict()
del s["sender"]
del s["surveyGroup"]
del s["reducer"]
if not self.comments:
del s["remarks"]
else:
s["remarks"] = s["remarks"].replace('"', "'")[0:750]
s.update(header)
if s["relatedFiles"] and filesAppended == False:
filesAppended = True
for f in relatedFiles:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = f[
"filepath"].split("/")[-1]
thisFile["url"] = f["filepath"]
if self.comments:
thisFile["comment"] = f[
"fileComment"].replace("\n", " ").strip()
thisFile["dateObs"] = s["obsdate"]
thisFile["spec1phot2"] = 1
relatedFilesTable.append(thisFile)
for ffile in [s["filepath"], s["fitsFilepath"]]:
if ffile:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = ffile.split(
"/")[-1]
thisFile["url"] = ffile
if self.comments:
thisFile["comment"] = ""
thisFile["dateObs"] = s["obsdate"]
thisFile["spec1phot2"] = 1
relatedFilesTable.append(thisFile)
del s["filepath"]
del s["fitsFilepath"]
del s["relatedFiles"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "survey", "obsdate", "specType", "transRedshift",
"telescope", "exptime", "reportAddedDate", "TNSuser"]
for k, v in s.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = s[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
specData.append(orow)
self.log.info('completed the ``_parse_spectral_data`` method')
return specData, relatedFilesTable
|
thespacedoctor/transientNamer
|
transientNamer/search.py
|
search.json
|
python
|
def json(
self,
dirPath=None):
if dirPath:
p = self._file_prefix()
jsonSources = self.sourceResults.json(
filepath=dirPath + "/" + p + "sources.json")
jsonPhot = self.photResults.json(
filepath=dirPath + "/" + p + "phot.json")
jsonSpec = self.specResults.json(
filepath=dirPath + "/" + p + "spec.json")
jsonFiles = self.relatedFilesResults.json(
filepath=dirPath + "/" + p + "relatedFiles.json")
else:
jsonSources = self.sourceResults.json()
jsonPhot = self.photResults.json()
jsonSpec = self.specResults.json()
jsonFiles = self.relatedFilesResults.json()
return jsonSources, jsonPhot, jsonSpec, jsonFiles
|
*Render the results in json format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `jsonSources` -- the top-level transient data
- `jsonPhot` -- all photometry associated with the transients
- `jsonSpec` -- all spectral data associated with the transients
- `jsonFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in json format:
.. code-block:: python
jsonSources, jsonPhot, jsonSpec, jsonFiles = tns.json()
print jsonSources
.. code-block:: text
[
{
"TNSId": "2016asf",
"TNSName": "SN2016asf",
"decDeg": 31.1126,
"decSex": "+31:06:45.36",
"discDate": "2016-03-06 08:09:36",
"discMag": "17.1",
"discMagFilter": "V-Johnson",
"discSurvey": "ASAS-SN",
"discoveryName": "ASASSN-16cs",
"hostName": "KUG 0647+311",
"hostRedshift": null,
"objectUrl": "http://wis-tns.weizmann.ac.il/object/2016asf",
"raDeg": 102.65304166666667,
"raSex": "06:50:36.73",
"separationArcsec": "0.66",
"separationEastArcsec": "-0.13",
"separationNorthArcsec": "0.65",
"specType": "SN Ia",
"transRedshift": "0.021"
}
]
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.json("~/tns")
.. image:: https://i.imgur.com/wAHqARI.png
:width: 800px
:alt: json output
|
train
|
https://github.com/thespacedoctor/transientNamer/blob/39be410c84275ed4669632f5df67e728d66a318f/transientNamer/search.py#L282-L357
|
[
"def _file_prefix(\n self):\n \"\"\"*Generate a file prefix based on the type of search for saving files to disk*\n\n **Return:**\n - ``prefix`` -- the file prefix\n \"\"\"\n self.log.info('starting the ``_file_prefix`` method')\n\n if self.ra:\n now = datetime.now()\n prefix = now.strftime(\"%Y%m%dt%H%M%S%f_tns_conesearch_\")\n elif self.name:\n prefix = self.name + \"_tns_conesearch_\"\n elif self.internal_name:\n prefix = self.internal_name + \"_tns_conesearch_\"\n elif self.discInLastDays:\n discInLastDays = str(self.discInLastDays)\n now = datetime.now()\n prefix = now.strftime(\n discInLastDays + \"d_since_%Y%m%d_tns_conesearch_\")\n\n self.log.info('completed the ``_file_prefix`` method')\n return prefix\n"
] |
class search():
"""
*The worker class for the transient namer search module*
**Key Arguments:**
- ``log`` -- logger
- ``settings`` -- the settings dictionary
- ``ra`` -- RA of the location being checked
- ``dec`` -- DEC of the location being searched
- ``radiusArcsec`` - the radius of the conesearch to perform against the TNS
- ``name`` -- name of the object to search the TNS for
- ``discInLastDays`` -- search the TNS for transient discovered in the last X days
- ``comments`` -- print the comments from the TNS, note these can be long making table outputs somewhat unreadable. Default *False*
**Usage:**
To initiate a search object to search the TNS via an object name (either TNS or survey names accepted):
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
name="Gaia16bbi"
)
or for a conesearch use something similar to:
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
ra="06:50:36.74",
dec="+31:06:44.7",
radiusArcsec=5
)
Note the search method can accept coordinates in sexagesimal or decimal defree formats.
To list all new objects discovered in the last three weeks, then use:
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
discInLastDays=21
)
"""
# Initialisation
def __init__(
self,
log,
ra="",
dec="",
radiusArcsec="",
name="",
discInLastDays="",
settings=False,
comments=False
):
self.log = log
log.debug("instansiating a new 'search' object")
self.settings = settings
self.ra = ra
self.dec = dec
self.radiusArcsec = radiusArcsec
self.comments = comments
self.name = name
self.internal_name = ""
self.discInLastDays = discInLastDays
self.page = 0
self.batchSize = 1000
# CREATE THE TIME-RANGE WINDOW TO SEARCH TNS
if not discInLastDays:
self.start = ""
self.end = ""
else:
discInLastDays = int(discInLastDays)
td = timedelta(days=1)
end = datetime.now() + td
self.end = end.strftime("%Y-%m-%d")
td = timedelta(days=discInLastDays)
start = datetime.now() - td
self.start = start.strftime("%Y-%m-%d")
# DETERMINE IF WE HAVE A TNS OR INTERAL SURVEY NAME
if self.name:
matchObject = re.match(r'^((SN|AT) ?)?(\d{4}\w{1,6})', self.name)
if matchObject:
self.name = matchObject.group(3)
else:
self.internal_name = self.name
self.name = ""
# DO THE SEARCH OF THE TNS AND COMPILE THE RESULTS INTO SEPARATE RESULT
# SETS
self.sourceResultsList, self.photResultsList, self.specResultsList, self.relatedFilesResultsList = self._query_tns()
self.sourceResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.sourceResultsList
)
self.photResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.photResultsList
)
self.specResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.specResultsList
)
self.relatedFilesResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.relatedFilesResultsList
)
return None
@property
def sources(
self):
"""*The results of the search returned as a python list of dictionaries*
**Usage:**
.. code-block:: python
sources = tns.sources
"""
sourceResultsList = []
sourceResultsList[:] = [dict(l) for l in self.sourceResultsList]
return sourceResultsList
@property
def spectra(
self):
"""*The associated source spectral data*
**Usage:**
.. code-block:: python
sourceSpectra = tns.spectra
"""
specResultsList = []
specResultsList[:] = [dict(l) for l in self.specResultsList]
return specResultsList
@property
def files(
self):
"""*The associated source files*
**Usage:**
.. code-block:: python
sourceFiles = tns.files
"""
relatedFilesResultsList = []
relatedFilesResultsList[:] = [dict(l)
for l in self.relatedFilesResultsList]
return relatedFilesResultsList
@property
def photometry(
self):
"""*The associated source photometry*
**Usage:**
.. code-block:: python
sourcePhotometry = tns.photometry
"""
photResultsList = []
photResultsList[:] = [dict(l) for l in self.photResultsList]
return photResultsList
@property
def url(
self):
"""*The generated URL used for searching of the TNS*
**Usage:**
.. code-block:: python
searchURL = tns.url
"""
return self._searchURL
def csv(
self,
dirPath=None):
"""*Render the results in csv format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `csvSources` -- the top-level transient data
- `csvPhot` -- all photometry associated with the transients
- `csvSpec` -- all spectral data associated with the transients
- `csvFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in csv format:
.. code-block:: python
csvSources, csvPhot, csvSpec, csvFiles = tns.csv()
print csvSources
.. code-block:: text
TNSId,TNSName,discoveryName,discSurvey,raSex,decSex,raDeg,decDeg,transRedshift,specType,discMag,discMagFilter,discDate,objectUrl,hostName,hostRedshift,separationArcsec,separationNorthArcsec,separationEastArcsec
2016asf,SN2016asf,ASASSN-16cs,ASAS-SN,06:50:36.73,+31:06:45.36,102.6530,31.1126,0.021,SN Ia,17.1,V-Johnson,2016-03-06 08:09:36,http://wis-tns.weizmann.ac.il/object/2016asf,KUG 0647+311,,0.66,0.65,-0.13
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.csv("~/tns")
.. image:: https://i.imgur.com/BwwqMBg.png
:width: 800px
:alt: csv output
"""
if dirPath:
p = self._file_prefix()
csvSources = self.sourceResults.csv(
filepath=dirPath + "/" + p + "sources.csv")
csvPhot = self.photResults.csv(
filepath=dirPath + "/" + p + "phot.csv")
csvSpec = self.specResults.csv(
filepath=dirPath + "/" + p + "spec.csv")
csvFiles = self.relatedFilesResults.csv(
filepath=dirPath + "/" + p + "relatedFiles.csv")
else:
csvSources = self.sourceResults.csv()
csvPhot = self.photResults.csv()
csvSpec = self.specResults.csv()
csvFiles = self.relatedFilesResults.csv()
return csvSources, csvPhot, csvSpec, csvFiles
def yaml(
self,
dirPath=None):
"""*Render the results in yaml format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `yamlSources` -- the top-level transient data
- `yamlPhot` -- all photometry associated with the transients
- `yamlSpec` -- all spectral data associated with the transients
- `yamlFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in yaml format:
.. code-block:: python
yamlSources, yamlPhot, yamlSpec, yamlFiles = tns.yaml()
print yamlSources
.. code-block:: text
- TNSId: 2016asf
TNSName: SN2016asf
decDeg: 31.1126
decSex: '+31:06:45.36'
discDate: '2016-03-06 08:09:36'
discMag: '17.1'
discMagFilter: V-Johnson
discSurvey: ASAS-SN
discoveryName: ASASSN-16cs
hostName: KUG 0647+311
hostRedshift: null
objectUrl: http://wis-tns.weizmann.ac.il/object/2016asf
raDeg: 102.65304166666667
raSex: '06:50:36.73'
separationArcsec: '0.66'
separationEastArcsec: '-0.13'
separationNorthArcsec: '0.65'
specType: SN Ia
transRedshift: '0.021'
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.yaml("~/tns")
.. image:: https://i.imgur.com/ZpJIC6p.png
:width: 800px
:alt: yaml output
"""
if dirPath:
p = self._file_prefix()
yamlSources = self.sourceResults.yaml(
filepath=dirPath + "/" + p + "sources.yaml")
yamlPhot = self.photResults.yaml(
filepath=dirPath + "/" + p + "phot.yaml")
yamlSpec = self.specResults.yaml(
filepath=dirPath + "/" + p + "spec.yaml")
yamlFiles = self.relatedFilesResults.yaml(
filepath=dirPath + "/" + p + "relatedFiles.yaml")
else:
yamlSources = self.sourceResults.yaml()
yamlPhot = self.photResults.yaml()
yamlSpec = self.specResults.yaml()
yamlFiles = self.relatedFilesResults.yaml()
return yamlSources, yamlPhot, yamlSpec, yamlFiles
def markdown(
self,
dirPath=None):
"""*Render the results in markdown format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `markdownSources` -- the top-level transient data
- `markdownPhot` -- all photometry associated with the transients
- `markdownSpec` -- all spectral data associated with the transients
- `markdownFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in markdown table format:
.. code-block:: python
markdownSources, markdownPhot, markdownSpec, markdownFiles = tns.markdown()
print markdownSources
.. code-block:: text
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
|:---------|:-----------|:---------------|:------------|:-------------|:--------------|:----------|:---------|:---------------|:----------|:---------|:---------------|:---------------------|:----------------------------------------------|:--------------|:--------------|:------------------|:-----------------------|:----------------------|
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.markdown("~/tns")
.. image:: https://i.imgur.com/AYLBQoJ.png
:width: 800px
:alt: markdown output
"""
if dirPath:
p = self._file_prefix()
markdownSources = self.sourceResults.markdown(
filepath=dirPath + "/" + p + "sources.md")
markdownPhot = self.photResults.markdown(
filepath=dirPath + "/" + p + "phot.md")
markdownSpec = self.specResults.markdown(
filepath=dirPath + "/" + p + "spec.md")
markdownFiles = self.relatedFilesResults.markdown(
filepath=dirPath + "/" + p + "relatedFiles.md")
else:
markdownSources = self.sourceResults.markdown()
markdownPhot = self.photResults.markdown()
markdownSpec = self.specResults.markdown()
markdownFiles = self.relatedFilesResults.markdown()
return markdownSources, markdownPhot, markdownSpec, markdownFiles
def table(
self,
dirPath=None):
"""*Render the results as an ascii table*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `tableSources` -- the top-level transient data
- `tablePhot` -- all photometry associated with the transients
- `tableSpec` -- all spectral data associated with the transients
- `tableFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in ascii table format:
.. code-block:: python
tableSources, tablePhot, tableSpec, tableFiles = tns.table()
print tableSources
.. code-block:: text
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.table("~/tns")
.. image:: https://i.imgur.com/m09M0ho.png
:width: 800px
:alt: ascii files
"""
if dirPath:
p = self._file_prefix()
tableSources = self.sourceResults.table(
filepath=dirPath + "/" + p + "sources.ascii")
tablePhot = self.photResults.table(
filepath=dirPath + "/" + p + "phot.ascii")
tableSpec = self.specResults.table(
filepath=dirPath + "/" + p + "spec.ascii")
tableFiles = self.relatedFilesResults.table(
filepath=dirPath + "/" + p + "relatedFiles.ascii")
else:
tableSources = self.sourceResults.table()
tablePhot = self.photResults.table()
tableSpec = self.specResults.table()
tableFiles = self.relatedFilesResults.table()
return tableSources, tablePhot, tableSpec, tableFiles
def mysql(
self,
tableNamePrefix="TNS",
dirPath=None):
"""*Render the results as MySQL Insert statements*
**Key Arguments:**
- ``tableNamePrefix`` -- the prefix for the database table names to assign the insert statements to. Default *TNS*.
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `mysqlSources` -- the top-level transient data
- `mysqlPhot` -- all photometry associated with the transients
- `mysqlSpec` -- all spectral data associated with the transients
- `mysqlFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in mysql insert format:
.. code-block:: python
mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles = tns.mysql("TNS")
print mysqlSources
.. code-block:: text
INSERT INTO `TNS_sources` (TNSId,TNSName,dateCreated,decDeg,decSex,discDate,discMag,discMagFilter,discSurvey,discoveryName,hostName,hostRedshift,objectUrl,raDeg,raSex,separationArcsec,separationEastArcsec,separationNorthArcsec,specType,transRedshift) VALUES ("2016asf" ,"SN2016asf" ,"2016-09-20T11:22:13" ,"31.1126" ,"+31:06:45.36" ,"2016-03-06 08:09:36" ,"17.1" ,"V-Johnson" ,"ASAS-SN" ,"ASASSN-16cs" ,"KUG 0647+311" ,null ,"http://wis-tns.weizmann.ac.il/object/2016asf" ,"102.653041667" ,"06:50:36.73" ,"0.66" ,"-0.13" ,"0.65" ,"SN Ia" ,"0.021") ON DUPLICATE KEY UPDATE TNSId="2016asf", TNSName="SN2016asf", dateCreated="2016-09-20T11:22:13", decDeg="31.1126", decSex="+31:06:45.36", discDate="2016-03-06 08:09:36", discMag="17.1", discMagFilter="V-Johnson", discSurvey="ASAS-SN", discoveryName="ASASSN-16cs", hostName="KUG 0647+311", hostRedshift=null, objectUrl="http://wis-tns.weizmann.ac.il/object/2016asf", raDeg="102.653041667", raSex="06:50:36.73", separationArcsec="0.66", separationEastArcsec="-0.13", separationNorthArcsec="0.65", specType="SN Ia", transRedshift="0.021", updated=1, dateLastModified=NOW() ;
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.mysql("TNS", "~/tns")
.. image:: https://i.imgur.com/CozySPW.png
:width: 800px
:alt: mysql output
"""
if dirPath:
p = self._file_prefix()
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_sources` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(20) NOT NULL,
`TNSName` varchar(20) DEFAULT NULL,
`dateCreated` datetime DEFAULT NULL,
`decDeg` double DEFAULT NULL,
`decSex` varchar(45) DEFAULT NULL,
`discDate` datetime DEFAULT NULL,
`discMag` double DEFAULT NULL,
`discMagFilter` varchar(45) DEFAULT NULL,
`discSurvey` varchar(100) DEFAULT NULL,
`discoveryName` varchar(100) DEFAULT NULL,
`objectUrl` varchar(200) DEFAULT NULL,
`raDeg` double DEFAULT NULL,
`raSex` varchar(45) DEFAULT NULL,
`specType` varchar(100) DEFAULT NULL,
`transRedshift` double DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`hostName` VARCHAR(100) NULL DEFAULT NULL,
`hostRedshift` DOUBLE NULL DEFAULT NULL,
`survey` VARCHAR(100) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid` (`TNSId`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlSources = self.sourceResults.mysql(
tableNamePrefix + "_sources", filepath=dirPath + "/" + p + "sources.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_photometry` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(20) NOT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`exptime` double DEFAULT NULL,
`filter` varchar(100) DEFAULT NULL,
`limitingMag` tinyint(4) DEFAULT NULL,
`mag` double DEFAULT NULL,
`magErr` double DEFAULT NULL,
`magUnit` varchar(100) DEFAULT NULL,
`objectName` varchar(100) DEFAULT NULL,
`obsdate` datetime DEFAULT NULL,
`reportAddedDate` datetime DEFAULT NULL,
`suggestedType` varchar(100) DEFAULT NULL,
`survey` varchar(100) DEFAULT NULL,
`telescope` varchar(100) DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`remarks` VARCHAR(800) NULL DEFAULT NULL,
`sourceComment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid_survey_obsdate` (`TNSId`,`survey`,`obsdate`),
UNIQUE INDEX `u_tnsid_survey_obsdate` (`TNSId` ASC, `survey` ASC, `obsdate` ASC),
UNIQUE INDEX `u_tnsid_obsdate_objname` (`TNSId` ASC, `obsdate` ASC, `objectName` ASC)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlPhot = self.photResults.mysql(
tableNamePrefix + "_photometry", filepath=dirPath + "/" + p + "phot.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_spectra` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(45) NOT NULL,
`TNSuser` varchar(45) DEFAULT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`exptime` double DEFAULT NULL,
`obsdate` datetime DEFAULT NULL,
`reportAddedDate` datetime DEFAULT NULL,
`specType` varchar(100) DEFAULT NULL,
`survey` varchar(100) DEFAULT NULL,
`telescope` varchar(100) DEFAULT NULL,
`transRedshift` double DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`remarks` VARCHAR(800) NULL DEFAULT NULL,
`sourceComment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `u_tnsid_survey_obsdate` (`TNSId`,`survey`,`obsdate`),
UNIQUE KEY `u_id_user_obsdate` (`TNSId`,`TNSuser`,`obsdate`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlSpec = self.specResults.mysql(
tableNamePrefix + "_spectra", filepath=dirPath + "/" + p + "spec.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_files` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(100) NOT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`dateObs` datetime DEFAULT NULL,
`filename` varchar(200) DEFAULT NULL,
`spec1phot2` tinyint(4) DEFAULT NULL,
`url` varchar(800) DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`comment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid_url` (`TNSId`,`url`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlFiles = self.relatedFilesResults.mysql(
tableNamePrefix + "_files", filepath=dirPath + "/" + p + "relatedFiles.sql", createStatement=createStatement)
else:
mysqlSources = self.sourceResults.mysql(
tableNamePrefix + "_sources")
mysqlPhot = self.photResults.mysql(tableNamePrefix + "_photometry")
mysqlSpec = self.specResults.mysql(tableNamePrefix + "_spectra")
mysqlFiles = self.relatedFilesResults.mysql(
tableNamePrefix + "_files")
return mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles
def _query_tns(self):
"""
*determine how to query the TNS, send query and parse the results*
**Return:**
- ``results`` -- a list of dictionaries (one dictionary for each result set returned from the TNS)
"""
self.log.info('starting the ``get`` method')
sourceTable = []
photoTable = []
specTable = []
relatedFilesTable = []
# THIS stop IS TO KEEP TRACK OF THE TNS PAGINATION IF MANY RESULT PAGES
# ARE RETURNED
stop = False
sourceCount = 0
while not stop:
status_code, content, self._searchURL = self._get_tns_search_results()
if status_code != 200:
self.log.error(
'cound not get the search reuslts from the TNS, HTML error code %(status_code)s ' % locals())
return None
if "No results found" in content:
print "No results found"
return sourceTable, photoTable, specTable, relatedFilesTable
if self._parse_transient_rows(content, True) < self.batchSize:
stop = True
else:
self.page += 1
thisPage = self.page
print "Downloaded %(thisPage)s page(s) from the TNS. %(sourceCount)s transients parsed so far." % locals()
sourceCount += self.batchSize
print "\t" + self._searchURL
timesleep.sleep(1)
# PARSE ALL ROWS RETURNED
for transientRow in self._parse_transient_rows(content):
# TOP LEVEL DISCOVERY CONTENT
sourceContent = transientRow.group()
discInfo, TNSId = self._parse_discovery_information(
sourceContent)
sourceTable.append(discInfo)
# PHOTOMETERY
phot, relatedFiles = self._parse_photometry_data(
sourceContent, TNSId)
photoTable += phot
relatedFilesTable += relatedFiles
# SPECTRA
spec, relatedFiles = self._parse_spectral_data(
sourceContent, TNSId)
specTable += spec
relatedFilesTable += relatedFiles
# SORT BY SEPARATION FROM THE SEARCH COORDINATES
try:
sourceTable = sorted(sourceTable, key=itemgetter(
'separationArcsec'), reverse=False)
except:
pass
self.log.info('completed the ``get`` method')
return sourceTable, photoTable, specTable, relatedFilesTable
def _get_tns_search_results(
self):
"""
*query the tns and result the response*
"""
self.log.info('starting the ``_get_tns_search_results`` method')
try:
response = requests.get(
url="http://wis-tns.weizmann.ac.il/search",
params={
"page": self.page,
"ra": self.ra,
"decl": self.dec,
"radius": self.radiusArcsec,
"name": self.name,
"internal_name": self.internal_name,
"date_start[date]": self.start,
"date_end[date]": self.end,
"num_page": self.batchSize,
"display[redshift]": "1",
"display[hostname]": "1",
"display[host_redshift]": "1",
"display[source_group_name]": "1",
"display[internal_name]": "1",
"display[spectra_count]": "1",
"display[discoverymag]": "1",
"display[discmagfilter]": "1",
"display[discoverydate]": "1",
"display[discoverer]": "1",
"display[sources]": "1",
"display[bibcode]": "1",
},
)
except requests.exceptions.RequestException:
print('HTTP Request failed')
self.log.info('completed the ``_get_tns_search_results`` method')
return response.status_code, response.content, response.url
def _file_prefix(
self):
"""*Generate a file prefix based on the type of search for saving files to disk*
**Return:**
- ``prefix`` -- the file prefix
"""
self.log.info('starting the ``_file_prefix`` method')
if self.ra:
now = datetime.now()
prefix = now.strftime("%Y%m%dt%H%M%S%f_tns_conesearch_")
elif self.name:
prefix = self.name + "_tns_conesearch_"
elif self.internal_name:
prefix = self.internal_name + "_tns_conesearch_"
elif self.discInLastDays:
discInLastDays = str(self.discInLastDays)
now = datetime.now()
prefix = now.strftime(
discInLastDays + "d_since_%Y%m%d_tns_conesearch_")
self.log.info('completed the ``_file_prefix`` method')
return prefix
def _parse_transient_rows(
self,
content,
count=False):
"""* parse transient rows from the TNS result page content*
**Key Arguments:**
- ``content`` -- the content from the TNS results page.
- ``count`` -- return only the number of rows
**Return:**
- ``transientRows``
"""
self.log.info('starting the ``_parse_transient_rows`` method')
regexForRow = r"""\n([^\n]*?<a href="/object/.*?)(?=\n[^\n]*?<a href="/object/|<\!\-\- /\.section, /#content \-\->)"""
if count:
# A SINGLE SOURCE BLOCK
matchedSources = re.findall(
regexForRow,
content,
flags=re.S # re.S
)
return len(matchedSources)
# A SINGLE SOURCE BLOCK
matchedSources = re.finditer(
regexForRow,
content,
flags=re.S # re.S
)
self.log.info('completed the ``_parse_transient_rows`` method')
return matchedSources
def _parse_discovery_information(
self,
content):
"""* parse discovery information from one row on the TNS results page*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page.
**Return:**
- ``discoveryData`` -- dictionary of results
- ``TNSId`` -- the unique TNS id for the transient
"""
self.log.info('starting the ``_parse_discovery_information`` method')
# ASTROCALC UNIT CONVERTER OBJECT
converter = unit_conversion(
log=self.log
)
matches = re.finditer(
r"""<tr class="row-.*?"><td class="cell-id">(?P<tnsId>\d*?)</td><td class="cell-name"><a href="(?P<objectUrl>.*?)">(?P<TNSName>.*?)</a></td><td class="cell-.*?<td class="cell-ra">(?P<raSex>.*?)</td><td class="cell-decl">(?P<decSex>.*?)</td><td class="cell-ot_name">(?P<specType>.*?)</td><td class="cell-redshift">(?P<transRedshift>.*?)</td><td class="cell-hostname">(?P<hostName>.*?)</td><td class="cell-host_redshift">(?P<hostRedshift>.*?)</td><td class="cell-source_group_name">(?P<discSurvey>.*?)</td>.*?<td class="cell-internal_name">(<a.*?>)?(?P<discoveryName>.*?)(</a>)?</td>.*?<td class="cell-discoverymag">(?P<discMag>.*?)</td><td class="cell-disc_filter_name">(?P<discMagFilter>.*?)</td><td class="cell-discoverydate">(?P<discDate>.*?)</td><td class="cell-discoverer">(?P<sender>.*?)</td>.*?</tr>""",
content,
flags=0 # re.S
)
discoveryData = []
for match in matches:
row = match.groupdict()
for k, v in row.iteritems():
row[k] = v.strip()
if len(v) == 0:
row[k] = None
if row["transRedshift"] == 0:
row["transRedshift"] = None
if row["TNSName"][0] in ["1", "2"]:
row["TNSName"] = "SN" + row["TNSName"]
row["objectUrl"] = "http://wis-tns.weizmann.ac.il" + \
row["objectUrl"]
# CONVERT COORDINATES TO DECIMAL DEGREES
row["raDeg"] = converter.ra_sexegesimal_to_decimal(
ra=row["raSex"]
)
row["decDeg"] = converter.dec_sexegesimal_to_decimal(
dec=row["decSex"]
)
# IF THIS IS A COORDINATE SEARCH, ADD SEPARATION FROM
# ORIGINAL QUERY COORDINATES
if self.ra:
# CALCULATE SEPARATION IN ARCSEC
from astrocalc.coords import separations
calculator = separations(
log=self.log,
ra1=self.ra,
dec1=self.dec,
ra2=row["raDeg"],
dec2=row["decDeg"],
)
angularSeparation, north, east = calculator.get()
row["separationArcsec"] = angularSeparation
row["separationNorthArcsec"] = north
row["separationEastArcsec"] = east
if not row["discSurvey"]:
row["survey"] = row["sender"]
del row["sender"]
del row["tnsId"]
row["TNSName"] = row["TNSName"].replace(" ", "")
row["TNSId"] = row["TNSName"].replace(
"SN", "").replace("AT", "")
TNSId = row["TNSId"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "TNSName", "discoveryName", "discSurvey", "raSex", "decSex", "raDeg", "decDeg",
"transRedshift", "specType", "discMag", "discMagFilter", "discDate", "objectUrl", "hostName", "hostRedshift", "separationArcsec", "separationNorthArcsec", "separationEastArcsec"]
for k, v in row.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = row[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
discoveryData.append(row)
self.log.info('completed the ``_parse_discovery_information`` method')
return discoveryData[0], TNSId
def _parse_photometry_data(
self,
content,
TNSId):
"""*parse photometry data from a row in the tns results content*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page
- ``TNSId`` -- the tns id of the transient
**Return:**
- ``photData`` -- a list of dictionaries of the photometry data
- ``relatedFilesTable`` -- a list of dictionaries of transient photometry related files
"""
self.log.info('starting the ``_parse_photometry_data`` method')
photData = []
relatedFilesTable = []
# AT REPORT BLOCK
ATBlock = re.search(
r"""<tr class=[^\n]*?AT reportings.*?(?=<tr class=[^\n]*?Classification reportings|$)""",
content,
flags=re.S # re.S
)
if ATBlock:
ATBlock = ATBlock.group()
reports = re.finditer(
r"""<tr class="row-[^"]*"><td class="cell-id">.*?</table>""",
ATBlock,
flags=re.S # re.S
)
relatedFiles = self._parse_related_files(ATBlock)
for r in reports:
header = re.search(
r"""<tr class="row[^"]*".*?time_received">(?P<reportAddedDate>[^<]*).*?user_name">(?P<sender>[^<]*).*?reporter_name">(?P<reporters>[^<]*).*?source_group_name">(?P<surveyGroup>[^<]*).*?ra">(?P<ra>[^<]*).*?decl">(?P<dec>[^<]*).*?discovery_date">(?P<obsDate>[^<]*).*?flux">(?P<mag>[^<]*).*?filter_name">(?P<magFilter>[^<]*).*?related_files">(?P<relatedFiles>[^<]*).*?type_name">(?P<suggestedType>[^<]*).*?hostname">(?P<hostName>[^<]*).*?host_redshift">(?P<hostRedshift>[^<]*).*?internal_name">(?P<objectName>[^<]*).*?groups">(?P<survey>[^<]*).*?remarks">(?P<sourceComment>[^<]*)""",
r.group(),
flags=0 # re.S
)
try:
header = header.groupdict()
except:
print r.group()
header["TNSId"] = TNSId
del header["reporters"]
del header["surveyGroup"]
del header["hostName"]
del header["hostRedshift"]
del header["mag"]
del header["magFilter"]
del header["obsDate"]
del header["ra"]
del header["dec"]
if not self.comments:
del header['sourceComment']
else:
theseComments = header[
"sourceComment"].split("\n")
header["sourceComment"] = ""
for c in theseComments:
header["sourceComment"] += " " + c.strip()
header["sourceComment"] = header[
"sourceComment"].strip().replace('"', "'")[0:750]
phot = re.finditer(
r"""<tr class="row\-[^"]*".*?obsdate">(?P<obsdate>[^<]*).*?flux">(?P<mag>[^<]*).*?fluxerr">(?P<magErr>[^<]*).*?limflux">(?P<limitingMag>[^<]*).*?unit_name">(?P<magUnit>[^<]*).*?filter_name">(?P<filter>[^<]*).*?tel_inst">(?P<telescope>[^<]*).*?exptime">(?P<exptime>[^<]*).*?observer">(?P<observer>[^<]*).*?-remarks">(?P<remarks>[^<]*)""",
r.group(),
flags=0 # re.S
)
filesAppended = False
for p in phot:
p = p.groupdict()
del p["observer"]
if p["limitingMag"] and not p["mag"]:
p["mag"] = p["limitingMag"]
p["limitingMag"] = 1
p["remarks"] = p["remarks"].replace(
"[Last non detection]", "")
else:
p["limitingMag"] = 0
if not self.comments:
del p["remarks"]
p.update(header)
if p["relatedFiles"] and filesAppended == False:
filesAppended = True
for f in relatedFiles:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = f[
"filepath"].split("/")[-1]
thisFile["url"] = f["filepath"]
if self.comments:
thisFile["comment"] = f[
"fileComment"].replace("\n", " ").strip().replace('"', "'")[0:750]
thisFile["dateObs"] = p["obsdate"]
thisFile["spec1phot2"] = 2
relatedFilesTable.append(thisFile)
if not p["survey"] and not p["objectName"]:
p["survey"] = p["sender"]
del p["relatedFiles"]
del p["sender"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "survey", "obsdate", "filter", "limitingMag", "mag", "magErr",
"magUnit", "suggestedType", "telescope", "exptime", "reportAddedDate"]
for k, v in p.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = p[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
photData.append(orow)
self.log.info('completed the ``_parse_photometry_data`` method')
return photData, relatedFilesTable
def _parse_related_files(
self,
content):
"""*parse the contents for related files URLs and comments*
**Key Arguments:**
- ``content`` -- the content to parse.
**Return:**
- ``relatedFiles`` -- a list of dictionaries of transient related files
"""
self.log.info('starting the ``_parse_related_files`` method')
relatedFilesList = re.finditer(
r"""<td class="cell-filename">.*?href="(?P<filepath>[^"]*).*?remarks">(?P<fileComment>[^<]*)""",
content,
flags=0 # re.S
)
relatedFiles = []
for f in relatedFilesList:
f = f.groupdict()
relatedFiles.append(f)
self.log.info('completed the ``_parse_related_files`` method')
return relatedFiles
def _parse_spectral_data(
self,
content,
TNSId):
"""*parse spectra data from a row in the tns results content*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page
- ``TNSId`` -- the tns id of the transient
**Return:**
- ``specData`` -- a list of dictionaries of the spectral data
- ``relatedFilesTable`` -- a list of dictionaries of transient spectrum related files
"""
self.log.info('starting the ``_parse_spectral_data`` method')
specData = []
relatedFilesTable = []
# CLASSIFICATION BLOCK
classBlock = re.search(
r"""<tr class=[^\n]*?Classification reportings.*$""",
content,
flags=re.S # re.S
)
if classBlock:
classBlock = classBlock.group()
reports = re.finditer(
r"""<tr class="row-[^"]*"><td class="cell-id">.*?</tbody>\s*</table>\s*</div></td> </tr>\s*</tbody>\s*</table>\s*</div></td> </tr>""",
classBlock,
flags=re.S #
)
relatedFiles = self._parse_related_files(classBlock)
for r in reports:
header = re.search(
r"""<tr class="row.*?time_received">(?P<reportAddedDate>[^<]*).*?user_name">(?P<TNSuser>[^<]*).*?classifier_name">(?P<reporters>[^<]*).*?source_group_name">(?P<survey>[^<]*).*?-type">(?P<specType>[^<]*).*?-redshift">(?P<transRedshift>[^<]*).*?-related_files">(?P<relatedFiles>[^<]*).*?-groups">(?P<surveyGroup>[^<]*).*?-remarks">(?P<sourceComment>[^<]*)</td>""",
r.group(),
flags=re.S # re.S
)
if not header:
continue
header = header.groupdict()
header["TNSId"] = TNSId
del header["reporters"]
del header["surveyGroup"]
del header["survey"]
if not self.comments:
del header['sourceComment']
else:
theseComments = header[
"sourceComment"].split("\n")
header["sourceComment"] = ""
for c in theseComments:
header["sourceComment"] += " " + c.strip()
header["sourceComment"] = header[
"sourceComment"].strip().replace('"', "'")[0:750]
spec = re.finditer(
r"""<tr class="class-results-.*?-obsdate">(?P<obsdate>[^<]*).*?-tel_inst">(?P<telescope>[^<]*).*?-exptime">(?P<exptime>[^<]*).*?-observer">(?P<sender>[^<]*).*?-reducer">(?P<reducer>[^<]*).*?-source_group_name">(?P<survey>[^<]*).*?-asciifile">(.*?<a href="(?P<filepath>[^"]*)".*?</a>)?.*?-fitsfile">(.*?<a href="(?P<fitsFilepath>[^"]*)".*?</a>)?.*?-groups">(?P<surveyGroup>[^<]*).*?-remarks">(?P<remarks>[^<]*)""",
r.group(),
flags=0 # re.S
)
filesAppended = False
for s in spec:
s = s.groupdict()
del s["sender"]
del s["surveyGroup"]
del s["reducer"]
if not self.comments:
del s["remarks"]
else:
s["remarks"] = s["remarks"].replace('"', "'")[0:750]
s.update(header)
if s["relatedFiles"] and filesAppended == False:
filesAppended = True
for f in relatedFiles:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = f[
"filepath"].split("/")[-1]
thisFile["url"] = f["filepath"]
if self.comments:
thisFile["comment"] = f[
"fileComment"].replace("\n", " ").strip()
thisFile["dateObs"] = s["obsdate"]
thisFile["spec1phot2"] = 1
relatedFilesTable.append(thisFile)
for ffile in [s["filepath"], s["fitsFilepath"]]:
if ffile:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = ffile.split(
"/")[-1]
thisFile["url"] = ffile
if self.comments:
thisFile["comment"] = ""
thisFile["dateObs"] = s["obsdate"]
thisFile["spec1phot2"] = 1
relatedFilesTable.append(thisFile)
del s["filepath"]
del s["fitsFilepath"]
del s["relatedFiles"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "survey", "obsdate", "specType", "transRedshift",
"telescope", "exptime", "reportAddedDate", "TNSuser"]
for k, v in s.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = s[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
specData.append(orow)
self.log.info('completed the ``_parse_spectral_data`` method')
return specData, relatedFilesTable
|
thespacedoctor/transientNamer
|
transientNamer/search.py
|
search.yaml
|
python
|
def yaml(
self,
dirPath=None):
if dirPath:
p = self._file_prefix()
yamlSources = self.sourceResults.yaml(
filepath=dirPath + "/" + p + "sources.yaml")
yamlPhot = self.photResults.yaml(
filepath=dirPath + "/" + p + "phot.yaml")
yamlSpec = self.specResults.yaml(
filepath=dirPath + "/" + p + "spec.yaml")
yamlFiles = self.relatedFilesResults.yaml(
filepath=dirPath + "/" + p + "relatedFiles.yaml")
else:
yamlSources = self.sourceResults.yaml()
yamlPhot = self.photResults.yaml()
yamlSpec = self.specResults.yaml()
yamlFiles = self.relatedFilesResults.yaml()
return yamlSources, yamlPhot, yamlSpec, yamlFiles
|
*Render the results in yaml format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `yamlSources` -- the top-level transient data
- `yamlPhot` -- all photometry associated with the transients
- `yamlSpec` -- all spectral data associated with the transients
- `yamlFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in yaml format:
.. code-block:: python
yamlSources, yamlPhot, yamlSpec, yamlFiles = tns.yaml()
print yamlSources
.. code-block:: text
- TNSId: 2016asf
TNSName: SN2016asf
decDeg: 31.1126
decSex: '+31:06:45.36'
discDate: '2016-03-06 08:09:36'
discMag: '17.1'
discMagFilter: V-Johnson
discSurvey: ASAS-SN
discoveryName: ASASSN-16cs
hostName: KUG 0647+311
hostRedshift: null
objectUrl: http://wis-tns.weizmann.ac.il/object/2016asf
raDeg: 102.65304166666667
raSex: '06:50:36.73'
separationArcsec: '0.66'
separationEastArcsec: '-0.13'
separationNorthArcsec: '0.65'
specType: SN Ia
transRedshift: '0.021'
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.yaml("~/tns")
.. image:: https://i.imgur.com/ZpJIC6p.png
:width: 800px
:alt: yaml output
|
train
|
https://github.com/thespacedoctor/transientNamer/blob/39be410c84275ed4669632f5df67e728d66a318f/transientNamer/search.py#L359-L430
|
[
"def _file_prefix(\n self):\n \"\"\"*Generate a file prefix based on the type of search for saving files to disk*\n\n **Return:**\n - ``prefix`` -- the file prefix\n \"\"\"\n self.log.info('starting the ``_file_prefix`` method')\n\n if self.ra:\n now = datetime.now()\n prefix = now.strftime(\"%Y%m%dt%H%M%S%f_tns_conesearch_\")\n elif self.name:\n prefix = self.name + \"_tns_conesearch_\"\n elif self.internal_name:\n prefix = self.internal_name + \"_tns_conesearch_\"\n elif self.discInLastDays:\n discInLastDays = str(self.discInLastDays)\n now = datetime.now()\n prefix = now.strftime(\n discInLastDays + \"d_since_%Y%m%d_tns_conesearch_\")\n\n self.log.info('completed the ``_file_prefix`` method')\n return prefix\n"
] |
class search():
"""
*The worker class for the transient namer search module*
**Key Arguments:**
- ``log`` -- logger
- ``settings`` -- the settings dictionary
- ``ra`` -- RA of the location being checked
- ``dec`` -- DEC of the location being searched
- ``radiusArcsec`` - the radius of the conesearch to perform against the TNS
- ``name`` -- name of the object to search the TNS for
- ``discInLastDays`` -- search the TNS for transient discovered in the last X days
- ``comments`` -- print the comments from the TNS, note these can be long making table outputs somewhat unreadable. Default *False*
**Usage:**
To initiate a search object to search the TNS via an object name (either TNS or survey names accepted):
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
name="Gaia16bbi"
)
or for a conesearch use something similar to:
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
ra="06:50:36.74",
dec="+31:06:44.7",
radiusArcsec=5
)
Note the search method can accept coordinates in sexagesimal or decimal defree formats.
To list all new objects discovered in the last three weeks, then use:
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
discInLastDays=21
)
"""
# Initialisation
def __init__(
self,
log,
ra="",
dec="",
radiusArcsec="",
name="",
discInLastDays="",
settings=False,
comments=False
):
self.log = log
log.debug("instansiating a new 'search' object")
self.settings = settings
self.ra = ra
self.dec = dec
self.radiusArcsec = radiusArcsec
self.comments = comments
self.name = name
self.internal_name = ""
self.discInLastDays = discInLastDays
self.page = 0
self.batchSize = 1000
# CREATE THE TIME-RANGE WINDOW TO SEARCH TNS
if not discInLastDays:
self.start = ""
self.end = ""
else:
discInLastDays = int(discInLastDays)
td = timedelta(days=1)
end = datetime.now() + td
self.end = end.strftime("%Y-%m-%d")
td = timedelta(days=discInLastDays)
start = datetime.now() - td
self.start = start.strftime("%Y-%m-%d")
# DETERMINE IF WE HAVE A TNS OR INTERAL SURVEY NAME
if self.name:
matchObject = re.match(r'^((SN|AT) ?)?(\d{4}\w{1,6})', self.name)
if matchObject:
self.name = matchObject.group(3)
else:
self.internal_name = self.name
self.name = ""
# DO THE SEARCH OF THE TNS AND COMPILE THE RESULTS INTO SEPARATE RESULT
# SETS
self.sourceResultsList, self.photResultsList, self.specResultsList, self.relatedFilesResultsList = self._query_tns()
self.sourceResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.sourceResultsList
)
self.photResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.photResultsList
)
self.specResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.specResultsList
)
self.relatedFilesResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.relatedFilesResultsList
)
return None
@property
def sources(
self):
"""*The results of the search returned as a python list of dictionaries*
**Usage:**
.. code-block:: python
sources = tns.sources
"""
sourceResultsList = []
sourceResultsList[:] = [dict(l) for l in self.sourceResultsList]
return sourceResultsList
@property
def spectra(
self):
"""*The associated source spectral data*
**Usage:**
.. code-block:: python
sourceSpectra = tns.spectra
"""
specResultsList = []
specResultsList[:] = [dict(l) for l in self.specResultsList]
return specResultsList
@property
def files(
self):
"""*The associated source files*
**Usage:**
.. code-block:: python
sourceFiles = tns.files
"""
relatedFilesResultsList = []
relatedFilesResultsList[:] = [dict(l)
for l in self.relatedFilesResultsList]
return relatedFilesResultsList
@property
def photometry(
self):
"""*The associated source photometry*
**Usage:**
.. code-block:: python
sourcePhotometry = tns.photometry
"""
photResultsList = []
photResultsList[:] = [dict(l) for l in self.photResultsList]
return photResultsList
@property
def url(
self):
"""*The generated URL used for searching of the TNS*
**Usage:**
.. code-block:: python
searchURL = tns.url
"""
return self._searchURL
def csv(
self,
dirPath=None):
"""*Render the results in csv format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `csvSources` -- the top-level transient data
- `csvPhot` -- all photometry associated with the transients
- `csvSpec` -- all spectral data associated with the transients
- `csvFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in csv format:
.. code-block:: python
csvSources, csvPhot, csvSpec, csvFiles = tns.csv()
print csvSources
.. code-block:: text
TNSId,TNSName,discoveryName,discSurvey,raSex,decSex,raDeg,decDeg,transRedshift,specType,discMag,discMagFilter,discDate,objectUrl,hostName,hostRedshift,separationArcsec,separationNorthArcsec,separationEastArcsec
2016asf,SN2016asf,ASASSN-16cs,ASAS-SN,06:50:36.73,+31:06:45.36,102.6530,31.1126,0.021,SN Ia,17.1,V-Johnson,2016-03-06 08:09:36,http://wis-tns.weizmann.ac.il/object/2016asf,KUG 0647+311,,0.66,0.65,-0.13
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.csv("~/tns")
.. image:: https://i.imgur.com/BwwqMBg.png
:width: 800px
:alt: csv output
"""
if dirPath:
p = self._file_prefix()
csvSources = self.sourceResults.csv(
filepath=dirPath + "/" + p + "sources.csv")
csvPhot = self.photResults.csv(
filepath=dirPath + "/" + p + "phot.csv")
csvSpec = self.specResults.csv(
filepath=dirPath + "/" + p + "spec.csv")
csvFiles = self.relatedFilesResults.csv(
filepath=dirPath + "/" + p + "relatedFiles.csv")
else:
csvSources = self.sourceResults.csv()
csvPhot = self.photResults.csv()
csvSpec = self.specResults.csv()
csvFiles = self.relatedFilesResults.csv()
return csvSources, csvPhot, csvSpec, csvFiles
def json(
self,
dirPath=None):
"""*Render the results in json format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `jsonSources` -- the top-level transient data
- `jsonPhot` -- all photometry associated with the transients
- `jsonSpec` -- all spectral data associated with the transients
- `jsonFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in json format:
.. code-block:: python
jsonSources, jsonPhot, jsonSpec, jsonFiles = tns.json()
print jsonSources
.. code-block:: text
[
{
"TNSId": "2016asf",
"TNSName": "SN2016asf",
"decDeg": 31.1126,
"decSex": "+31:06:45.36",
"discDate": "2016-03-06 08:09:36",
"discMag": "17.1",
"discMagFilter": "V-Johnson",
"discSurvey": "ASAS-SN",
"discoveryName": "ASASSN-16cs",
"hostName": "KUG 0647+311",
"hostRedshift": null,
"objectUrl": "http://wis-tns.weizmann.ac.il/object/2016asf",
"raDeg": 102.65304166666667,
"raSex": "06:50:36.73",
"separationArcsec": "0.66",
"separationEastArcsec": "-0.13",
"separationNorthArcsec": "0.65",
"specType": "SN Ia",
"transRedshift": "0.021"
}
]
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.json("~/tns")
.. image:: https://i.imgur.com/wAHqARI.png
:width: 800px
:alt: json output
"""
if dirPath:
p = self._file_prefix()
jsonSources = self.sourceResults.json(
filepath=dirPath + "/" + p + "sources.json")
jsonPhot = self.photResults.json(
filepath=dirPath + "/" + p + "phot.json")
jsonSpec = self.specResults.json(
filepath=dirPath + "/" + p + "spec.json")
jsonFiles = self.relatedFilesResults.json(
filepath=dirPath + "/" + p + "relatedFiles.json")
else:
jsonSources = self.sourceResults.json()
jsonPhot = self.photResults.json()
jsonSpec = self.specResults.json()
jsonFiles = self.relatedFilesResults.json()
return jsonSources, jsonPhot, jsonSpec, jsonFiles
def markdown(
self,
dirPath=None):
"""*Render the results in markdown format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `markdownSources` -- the top-level transient data
- `markdownPhot` -- all photometry associated with the transients
- `markdownSpec` -- all spectral data associated with the transients
- `markdownFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in markdown table format:
.. code-block:: python
markdownSources, markdownPhot, markdownSpec, markdownFiles = tns.markdown()
print markdownSources
.. code-block:: text
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
|:---------|:-----------|:---------------|:------------|:-------------|:--------------|:----------|:---------|:---------------|:----------|:---------|:---------------|:---------------------|:----------------------------------------------|:--------------|:--------------|:------------------|:-----------------------|:----------------------|
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.markdown("~/tns")
.. image:: https://i.imgur.com/AYLBQoJ.png
:width: 800px
:alt: markdown output
"""
if dirPath:
p = self._file_prefix()
markdownSources = self.sourceResults.markdown(
filepath=dirPath + "/" + p + "sources.md")
markdownPhot = self.photResults.markdown(
filepath=dirPath + "/" + p + "phot.md")
markdownSpec = self.specResults.markdown(
filepath=dirPath + "/" + p + "spec.md")
markdownFiles = self.relatedFilesResults.markdown(
filepath=dirPath + "/" + p + "relatedFiles.md")
else:
markdownSources = self.sourceResults.markdown()
markdownPhot = self.photResults.markdown()
markdownSpec = self.specResults.markdown()
markdownFiles = self.relatedFilesResults.markdown()
return markdownSources, markdownPhot, markdownSpec, markdownFiles
def table(
self,
dirPath=None):
"""*Render the results as an ascii table*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `tableSources` -- the top-level transient data
- `tablePhot` -- all photometry associated with the transients
- `tableSpec` -- all spectral data associated with the transients
- `tableFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in ascii table format:
.. code-block:: python
tableSources, tablePhot, tableSpec, tableFiles = tns.table()
print tableSources
.. code-block:: text
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.table("~/tns")
.. image:: https://i.imgur.com/m09M0ho.png
:width: 800px
:alt: ascii files
"""
if dirPath:
p = self._file_prefix()
tableSources = self.sourceResults.table(
filepath=dirPath + "/" + p + "sources.ascii")
tablePhot = self.photResults.table(
filepath=dirPath + "/" + p + "phot.ascii")
tableSpec = self.specResults.table(
filepath=dirPath + "/" + p + "spec.ascii")
tableFiles = self.relatedFilesResults.table(
filepath=dirPath + "/" + p + "relatedFiles.ascii")
else:
tableSources = self.sourceResults.table()
tablePhot = self.photResults.table()
tableSpec = self.specResults.table()
tableFiles = self.relatedFilesResults.table()
return tableSources, tablePhot, tableSpec, tableFiles
def mysql(
self,
tableNamePrefix="TNS",
dirPath=None):
"""*Render the results as MySQL Insert statements*
**Key Arguments:**
- ``tableNamePrefix`` -- the prefix for the database table names to assign the insert statements to. Default *TNS*.
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `mysqlSources` -- the top-level transient data
- `mysqlPhot` -- all photometry associated with the transients
- `mysqlSpec` -- all spectral data associated with the transients
- `mysqlFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in mysql insert format:
.. code-block:: python
mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles = tns.mysql("TNS")
print mysqlSources
.. code-block:: text
INSERT INTO `TNS_sources` (TNSId,TNSName,dateCreated,decDeg,decSex,discDate,discMag,discMagFilter,discSurvey,discoveryName,hostName,hostRedshift,objectUrl,raDeg,raSex,separationArcsec,separationEastArcsec,separationNorthArcsec,specType,transRedshift) VALUES ("2016asf" ,"SN2016asf" ,"2016-09-20T11:22:13" ,"31.1126" ,"+31:06:45.36" ,"2016-03-06 08:09:36" ,"17.1" ,"V-Johnson" ,"ASAS-SN" ,"ASASSN-16cs" ,"KUG 0647+311" ,null ,"http://wis-tns.weizmann.ac.il/object/2016asf" ,"102.653041667" ,"06:50:36.73" ,"0.66" ,"-0.13" ,"0.65" ,"SN Ia" ,"0.021") ON DUPLICATE KEY UPDATE TNSId="2016asf", TNSName="SN2016asf", dateCreated="2016-09-20T11:22:13", decDeg="31.1126", decSex="+31:06:45.36", discDate="2016-03-06 08:09:36", discMag="17.1", discMagFilter="V-Johnson", discSurvey="ASAS-SN", discoveryName="ASASSN-16cs", hostName="KUG 0647+311", hostRedshift=null, objectUrl="http://wis-tns.weizmann.ac.il/object/2016asf", raDeg="102.653041667", raSex="06:50:36.73", separationArcsec="0.66", separationEastArcsec="-0.13", separationNorthArcsec="0.65", specType="SN Ia", transRedshift="0.021", updated=1, dateLastModified=NOW() ;
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.mysql("TNS", "~/tns")
.. image:: https://i.imgur.com/CozySPW.png
:width: 800px
:alt: mysql output
"""
if dirPath:
p = self._file_prefix()
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_sources` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(20) NOT NULL,
`TNSName` varchar(20) DEFAULT NULL,
`dateCreated` datetime DEFAULT NULL,
`decDeg` double DEFAULT NULL,
`decSex` varchar(45) DEFAULT NULL,
`discDate` datetime DEFAULT NULL,
`discMag` double DEFAULT NULL,
`discMagFilter` varchar(45) DEFAULT NULL,
`discSurvey` varchar(100) DEFAULT NULL,
`discoveryName` varchar(100) DEFAULT NULL,
`objectUrl` varchar(200) DEFAULT NULL,
`raDeg` double DEFAULT NULL,
`raSex` varchar(45) DEFAULT NULL,
`specType` varchar(100) DEFAULT NULL,
`transRedshift` double DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`hostName` VARCHAR(100) NULL DEFAULT NULL,
`hostRedshift` DOUBLE NULL DEFAULT NULL,
`survey` VARCHAR(100) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid` (`TNSId`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlSources = self.sourceResults.mysql(
tableNamePrefix + "_sources", filepath=dirPath + "/" + p + "sources.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_photometry` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(20) NOT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`exptime` double DEFAULT NULL,
`filter` varchar(100) DEFAULT NULL,
`limitingMag` tinyint(4) DEFAULT NULL,
`mag` double DEFAULT NULL,
`magErr` double DEFAULT NULL,
`magUnit` varchar(100) DEFAULT NULL,
`objectName` varchar(100) DEFAULT NULL,
`obsdate` datetime DEFAULT NULL,
`reportAddedDate` datetime DEFAULT NULL,
`suggestedType` varchar(100) DEFAULT NULL,
`survey` varchar(100) DEFAULT NULL,
`telescope` varchar(100) DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`remarks` VARCHAR(800) NULL DEFAULT NULL,
`sourceComment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid_survey_obsdate` (`TNSId`,`survey`,`obsdate`),
UNIQUE INDEX `u_tnsid_survey_obsdate` (`TNSId` ASC, `survey` ASC, `obsdate` ASC),
UNIQUE INDEX `u_tnsid_obsdate_objname` (`TNSId` ASC, `obsdate` ASC, `objectName` ASC)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlPhot = self.photResults.mysql(
tableNamePrefix + "_photometry", filepath=dirPath + "/" + p + "phot.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_spectra` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(45) NOT NULL,
`TNSuser` varchar(45) DEFAULT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`exptime` double DEFAULT NULL,
`obsdate` datetime DEFAULT NULL,
`reportAddedDate` datetime DEFAULT NULL,
`specType` varchar(100) DEFAULT NULL,
`survey` varchar(100) DEFAULT NULL,
`telescope` varchar(100) DEFAULT NULL,
`transRedshift` double DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`remarks` VARCHAR(800) NULL DEFAULT NULL,
`sourceComment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `u_tnsid_survey_obsdate` (`TNSId`,`survey`,`obsdate`),
UNIQUE KEY `u_id_user_obsdate` (`TNSId`,`TNSuser`,`obsdate`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlSpec = self.specResults.mysql(
tableNamePrefix + "_spectra", filepath=dirPath + "/" + p + "spec.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_files` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(100) NOT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`dateObs` datetime DEFAULT NULL,
`filename` varchar(200) DEFAULT NULL,
`spec1phot2` tinyint(4) DEFAULT NULL,
`url` varchar(800) DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`comment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid_url` (`TNSId`,`url`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlFiles = self.relatedFilesResults.mysql(
tableNamePrefix + "_files", filepath=dirPath + "/" + p + "relatedFiles.sql", createStatement=createStatement)
else:
mysqlSources = self.sourceResults.mysql(
tableNamePrefix + "_sources")
mysqlPhot = self.photResults.mysql(tableNamePrefix + "_photometry")
mysqlSpec = self.specResults.mysql(tableNamePrefix + "_spectra")
mysqlFiles = self.relatedFilesResults.mysql(
tableNamePrefix + "_files")
return mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles
def _query_tns(self):
"""
*determine how to query the TNS, send query and parse the results*
**Return:**
- ``results`` -- a list of dictionaries (one dictionary for each result set returned from the TNS)
"""
self.log.info('starting the ``get`` method')
sourceTable = []
photoTable = []
specTable = []
relatedFilesTable = []
# THIS stop IS TO KEEP TRACK OF THE TNS PAGINATION IF MANY RESULT PAGES
# ARE RETURNED
stop = False
sourceCount = 0
while not stop:
status_code, content, self._searchURL = self._get_tns_search_results()
if status_code != 200:
self.log.error(
'cound not get the search reuslts from the TNS, HTML error code %(status_code)s ' % locals())
return None
if "No results found" in content:
print "No results found"
return sourceTable, photoTable, specTable, relatedFilesTable
if self._parse_transient_rows(content, True) < self.batchSize:
stop = True
else:
self.page += 1
thisPage = self.page
print "Downloaded %(thisPage)s page(s) from the TNS. %(sourceCount)s transients parsed so far." % locals()
sourceCount += self.batchSize
print "\t" + self._searchURL
timesleep.sleep(1)
# PARSE ALL ROWS RETURNED
for transientRow in self._parse_transient_rows(content):
# TOP LEVEL DISCOVERY CONTENT
sourceContent = transientRow.group()
discInfo, TNSId = self._parse_discovery_information(
sourceContent)
sourceTable.append(discInfo)
# PHOTOMETERY
phot, relatedFiles = self._parse_photometry_data(
sourceContent, TNSId)
photoTable += phot
relatedFilesTable += relatedFiles
# SPECTRA
spec, relatedFiles = self._parse_spectral_data(
sourceContent, TNSId)
specTable += spec
relatedFilesTable += relatedFiles
# SORT BY SEPARATION FROM THE SEARCH COORDINATES
try:
sourceTable = sorted(sourceTable, key=itemgetter(
'separationArcsec'), reverse=False)
except:
pass
self.log.info('completed the ``get`` method')
return sourceTable, photoTable, specTable, relatedFilesTable
def _get_tns_search_results(
self):
"""
*query the tns and result the response*
"""
self.log.info('starting the ``_get_tns_search_results`` method')
try:
response = requests.get(
url="http://wis-tns.weizmann.ac.il/search",
params={
"page": self.page,
"ra": self.ra,
"decl": self.dec,
"radius": self.radiusArcsec,
"name": self.name,
"internal_name": self.internal_name,
"date_start[date]": self.start,
"date_end[date]": self.end,
"num_page": self.batchSize,
"display[redshift]": "1",
"display[hostname]": "1",
"display[host_redshift]": "1",
"display[source_group_name]": "1",
"display[internal_name]": "1",
"display[spectra_count]": "1",
"display[discoverymag]": "1",
"display[discmagfilter]": "1",
"display[discoverydate]": "1",
"display[discoverer]": "1",
"display[sources]": "1",
"display[bibcode]": "1",
},
)
except requests.exceptions.RequestException:
print('HTTP Request failed')
self.log.info('completed the ``_get_tns_search_results`` method')
return response.status_code, response.content, response.url
def _file_prefix(
self):
"""*Generate a file prefix based on the type of search for saving files to disk*
**Return:**
- ``prefix`` -- the file prefix
"""
self.log.info('starting the ``_file_prefix`` method')
if self.ra:
now = datetime.now()
prefix = now.strftime("%Y%m%dt%H%M%S%f_tns_conesearch_")
elif self.name:
prefix = self.name + "_tns_conesearch_"
elif self.internal_name:
prefix = self.internal_name + "_tns_conesearch_"
elif self.discInLastDays:
discInLastDays = str(self.discInLastDays)
now = datetime.now()
prefix = now.strftime(
discInLastDays + "d_since_%Y%m%d_tns_conesearch_")
self.log.info('completed the ``_file_prefix`` method')
return prefix
def _parse_transient_rows(
self,
content,
count=False):
"""* parse transient rows from the TNS result page content*
**Key Arguments:**
- ``content`` -- the content from the TNS results page.
- ``count`` -- return only the number of rows
**Return:**
- ``transientRows``
"""
self.log.info('starting the ``_parse_transient_rows`` method')
regexForRow = r"""\n([^\n]*?<a href="/object/.*?)(?=\n[^\n]*?<a href="/object/|<\!\-\- /\.section, /#content \-\->)"""
if count:
# A SINGLE SOURCE BLOCK
matchedSources = re.findall(
regexForRow,
content,
flags=re.S # re.S
)
return len(matchedSources)
# A SINGLE SOURCE BLOCK
matchedSources = re.finditer(
regexForRow,
content,
flags=re.S # re.S
)
self.log.info('completed the ``_parse_transient_rows`` method')
return matchedSources
def _parse_discovery_information(
self,
content):
"""* parse discovery information from one row on the TNS results page*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page.
**Return:**
- ``discoveryData`` -- dictionary of results
- ``TNSId`` -- the unique TNS id for the transient
"""
self.log.info('starting the ``_parse_discovery_information`` method')
# ASTROCALC UNIT CONVERTER OBJECT
converter = unit_conversion(
log=self.log
)
matches = re.finditer(
r"""<tr class="row-.*?"><td class="cell-id">(?P<tnsId>\d*?)</td><td class="cell-name"><a href="(?P<objectUrl>.*?)">(?P<TNSName>.*?)</a></td><td class="cell-.*?<td class="cell-ra">(?P<raSex>.*?)</td><td class="cell-decl">(?P<decSex>.*?)</td><td class="cell-ot_name">(?P<specType>.*?)</td><td class="cell-redshift">(?P<transRedshift>.*?)</td><td class="cell-hostname">(?P<hostName>.*?)</td><td class="cell-host_redshift">(?P<hostRedshift>.*?)</td><td class="cell-source_group_name">(?P<discSurvey>.*?)</td>.*?<td class="cell-internal_name">(<a.*?>)?(?P<discoveryName>.*?)(</a>)?</td>.*?<td class="cell-discoverymag">(?P<discMag>.*?)</td><td class="cell-disc_filter_name">(?P<discMagFilter>.*?)</td><td class="cell-discoverydate">(?P<discDate>.*?)</td><td class="cell-discoverer">(?P<sender>.*?)</td>.*?</tr>""",
content,
flags=0 # re.S
)
discoveryData = []
for match in matches:
row = match.groupdict()
for k, v in row.iteritems():
row[k] = v.strip()
if len(v) == 0:
row[k] = None
if row["transRedshift"] == 0:
row["transRedshift"] = None
if row["TNSName"][0] in ["1", "2"]:
row["TNSName"] = "SN" + row["TNSName"]
row["objectUrl"] = "http://wis-tns.weizmann.ac.il" + \
row["objectUrl"]
# CONVERT COORDINATES TO DECIMAL DEGREES
row["raDeg"] = converter.ra_sexegesimal_to_decimal(
ra=row["raSex"]
)
row["decDeg"] = converter.dec_sexegesimal_to_decimal(
dec=row["decSex"]
)
# IF THIS IS A COORDINATE SEARCH, ADD SEPARATION FROM
# ORIGINAL QUERY COORDINATES
if self.ra:
# CALCULATE SEPARATION IN ARCSEC
from astrocalc.coords import separations
calculator = separations(
log=self.log,
ra1=self.ra,
dec1=self.dec,
ra2=row["raDeg"],
dec2=row["decDeg"],
)
angularSeparation, north, east = calculator.get()
row["separationArcsec"] = angularSeparation
row["separationNorthArcsec"] = north
row["separationEastArcsec"] = east
if not row["discSurvey"]:
row["survey"] = row["sender"]
del row["sender"]
del row["tnsId"]
row["TNSName"] = row["TNSName"].replace(" ", "")
row["TNSId"] = row["TNSName"].replace(
"SN", "").replace("AT", "")
TNSId = row["TNSId"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "TNSName", "discoveryName", "discSurvey", "raSex", "decSex", "raDeg", "decDeg",
"transRedshift", "specType", "discMag", "discMagFilter", "discDate", "objectUrl", "hostName", "hostRedshift", "separationArcsec", "separationNorthArcsec", "separationEastArcsec"]
for k, v in row.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = row[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
discoveryData.append(row)
self.log.info('completed the ``_parse_discovery_information`` method')
return discoveryData[0], TNSId
def _parse_photometry_data(
self,
content,
TNSId):
"""*parse photometry data from a row in the tns results content*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page
- ``TNSId`` -- the tns id of the transient
**Return:**
- ``photData`` -- a list of dictionaries of the photometry data
- ``relatedFilesTable`` -- a list of dictionaries of transient photometry related files
"""
self.log.info('starting the ``_parse_photometry_data`` method')
photData = []
relatedFilesTable = []
# AT REPORT BLOCK
ATBlock = re.search(
r"""<tr class=[^\n]*?AT reportings.*?(?=<tr class=[^\n]*?Classification reportings|$)""",
content,
flags=re.S # re.S
)
if ATBlock:
ATBlock = ATBlock.group()
reports = re.finditer(
r"""<tr class="row-[^"]*"><td class="cell-id">.*?</table>""",
ATBlock,
flags=re.S # re.S
)
relatedFiles = self._parse_related_files(ATBlock)
for r in reports:
header = re.search(
r"""<tr class="row[^"]*".*?time_received">(?P<reportAddedDate>[^<]*).*?user_name">(?P<sender>[^<]*).*?reporter_name">(?P<reporters>[^<]*).*?source_group_name">(?P<surveyGroup>[^<]*).*?ra">(?P<ra>[^<]*).*?decl">(?P<dec>[^<]*).*?discovery_date">(?P<obsDate>[^<]*).*?flux">(?P<mag>[^<]*).*?filter_name">(?P<magFilter>[^<]*).*?related_files">(?P<relatedFiles>[^<]*).*?type_name">(?P<suggestedType>[^<]*).*?hostname">(?P<hostName>[^<]*).*?host_redshift">(?P<hostRedshift>[^<]*).*?internal_name">(?P<objectName>[^<]*).*?groups">(?P<survey>[^<]*).*?remarks">(?P<sourceComment>[^<]*)""",
r.group(),
flags=0 # re.S
)
try:
header = header.groupdict()
except:
print r.group()
header["TNSId"] = TNSId
del header["reporters"]
del header["surveyGroup"]
del header["hostName"]
del header["hostRedshift"]
del header["mag"]
del header["magFilter"]
del header["obsDate"]
del header["ra"]
del header["dec"]
if not self.comments:
del header['sourceComment']
else:
theseComments = header[
"sourceComment"].split("\n")
header["sourceComment"] = ""
for c in theseComments:
header["sourceComment"] += " " + c.strip()
header["sourceComment"] = header[
"sourceComment"].strip().replace('"', "'")[0:750]
phot = re.finditer(
r"""<tr class="row\-[^"]*".*?obsdate">(?P<obsdate>[^<]*).*?flux">(?P<mag>[^<]*).*?fluxerr">(?P<magErr>[^<]*).*?limflux">(?P<limitingMag>[^<]*).*?unit_name">(?P<magUnit>[^<]*).*?filter_name">(?P<filter>[^<]*).*?tel_inst">(?P<telescope>[^<]*).*?exptime">(?P<exptime>[^<]*).*?observer">(?P<observer>[^<]*).*?-remarks">(?P<remarks>[^<]*)""",
r.group(),
flags=0 # re.S
)
filesAppended = False
for p in phot:
p = p.groupdict()
del p["observer"]
if p["limitingMag"] and not p["mag"]:
p["mag"] = p["limitingMag"]
p["limitingMag"] = 1
p["remarks"] = p["remarks"].replace(
"[Last non detection]", "")
else:
p["limitingMag"] = 0
if not self.comments:
del p["remarks"]
p.update(header)
if p["relatedFiles"] and filesAppended == False:
filesAppended = True
for f in relatedFiles:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = f[
"filepath"].split("/")[-1]
thisFile["url"] = f["filepath"]
if self.comments:
thisFile["comment"] = f[
"fileComment"].replace("\n", " ").strip().replace('"', "'")[0:750]
thisFile["dateObs"] = p["obsdate"]
thisFile["spec1phot2"] = 2
relatedFilesTable.append(thisFile)
if not p["survey"] and not p["objectName"]:
p["survey"] = p["sender"]
del p["relatedFiles"]
del p["sender"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "survey", "obsdate", "filter", "limitingMag", "mag", "magErr",
"magUnit", "suggestedType", "telescope", "exptime", "reportAddedDate"]
for k, v in p.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = p[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
photData.append(orow)
self.log.info('completed the ``_parse_photometry_data`` method')
return photData, relatedFilesTable
def _parse_related_files(
self,
content):
"""*parse the contents for related files URLs and comments*
**Key Arguments:**
- ``content`` -- the content to parse.
**Return:**
- ``relatedFiles`` -- a list of dictionaries of transient related files
"""
self.log.info('starting the ``_parse_related_files`` method')
relatedFilesList = re.finditer(
r"""<td class="cell-filename">.*?href="(?P<filepath>[^"]*).*?remarks">(?P<fileComment>[^<]*)""",
content,
flags=0 # re.S
)
relatedFiles = []
for f in relatedFilesList:
f = f.groupdict()
relatedFiles.append(f)
self.log.info('completed the ``_parse_related_files`` method')
return relatedFiles
def _parse_spectral_data(
self,
content,
TNSId):
"""*parse spectra data from a row in the tns results content*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page
- ``TNSId`` -- the tns id of the transient
**Return:**
- ``specData`` -- a list of dictionaries of the spectral data
- ``relatedFilesTable`` -- a list of dictionaries of transient spectrum related files
"""
self.log.info('starting the ``_parse_spectral_data`` method')
specData = []
relatedFilesTable = []
# CLASSIFICATION BLOCK
classBlock = re.search(
r"""<tr class=[^\n]*?Classification reportings.*$""",
content,
flags=re.S # re.S
)
if classBlock:
classBlock = classBlock.group()
reports = re.finditer(
r"""<tr class="row-[^"]*"><td class="cell-id">.*?</tbody>\s*</table>\s*</div></td> </tr>\s*</tbody>\s*</table>\s*</div></td> </tr>""",
classBlock,
flags=re.S #
)
relatedFiles = self._parse_related_files(classBlock)
for r in reports:
header = re.search(
r"""<tr class="row.*?time_received">(?P<reportAddedDate>[^<]*).*?user_name">(?P<TNSuser>[^<]*).*?classifier_name">(?P<reporters>[^<]*).*?source_group_name">(?P<survey>[^<]*).*?-type">(?P<specType>[^<]*).*?-redshift">(?P<transRedshift>[^<]*).*?-related_files">(?P<relatedFiles>[^<]*).*?-groups">(?P<surveyGroup>[^<]*).*?-remarks">(?P<sourceComment>[^<]*)</td>""",
r.group(),
flags=re.S # re.S
)
if not header:
continue
header = header.groupdict()
header["TNSId"] = TNSId
del header["reporters"]
del header["surveyGroup"]
del header["survey"]
if not self.comments:
del header['sourceComment']
else:
theseComments = header[
"sourceComment"].split("\n")
header["sourceComment"] = ""
for c in theseComments:
header["sourceComment"] += " " + c.strip()
header["sourceComment"] = header[
"sourceComment"].strip().replace('"', "'")[0:750]
spec = re.finditer(
r"""<tr class="class-results-.*?-obsdate">(?P<obsdate>[^<]*).*?-tel_inst">(?P<telescope>[^<]*).*?-exptime">(?P<exptime>[^<]*).*?-observer">(?P<sender>[^<]*).*?-reducer">(?P<reducer>[^<]*).*?-source_group_name">(?P<survey>[^<]*).*?-asciifile">(.*?<a href="(?P<filepath>[^"]*)".*?</a>)?.*?-fitsfile">(.*?<a href="(?P<fitsFilepath>[^"]*)".*?</a>)?.*?-groups">(?P<surveyGroup>[^<]*).*?-remarks">(?P<remarks>[^<]*)""",
r.group(),
flags=0 # re.S
)
filesAppended = False
for s in spec:
s = s.groupdict()
del s["sender"]
del s["surveyGroup"]
del s["reducer"]
if not self.comments:
del s["remarks"]
else:
s["remarks"] = s["remarks"].replace('"', "'")[0:750]
s.update(header)
if s["relatedFiles"] and filesAppended == False:
filesAppended = True
for f in relatedFiles:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = f[
"filepath"].split("/")[-1]
thisFile["url"] = f["filepath"]
if self.comments:
thisFile["comment"] = f[
"fileComment"].replace("\n", " ").strip()
thisFile["dateObs"] = s["obsdate"]
thisFile["spec1phot2"] = 1
relatedFilesTable.append(thisFile)
for ffile in [s["filepath"], s["fitsFilepath"]]:
if ffile:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = ffile.split(
"/")[-1]
thisFile["url"] = ffile
if self.comments:
thisFile["comment"] = ""
thisFile["dateObs"] = s["obsdate"]
thisFile["spec1phot2"] = 1
relatedFilesTable.append(thisFile)
del s["filepath"]
del s["fitsFilepath"]
del s["relatedFiles"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "survey", "obsdate", "specType", "transRedshift",
"telescope", "exptime", "reportAddedDate", "TNSuser"]
for k, v in s.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = s[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
specData.append(orow)
self.log.info('completed the ``_parse_spectral_data`` method')
return specData, relatedFilesTable
|
thespacedoctor/transientNamer
|
transientNamer/search.py
|
search.markdown
|
python
|
def markdown(
self,
dirPath=None):
if dirPath:
p = self._file_prefix()
markdownSources = self.sourceResults.markdown(
filepath=dirPath + "/" + p + "sources.md")
markdownPhot = self.photResults.markdown(
filepath=dirPath + "/" + p + "phot.md")
markdownSpec = self.specResults.markdown(
filepath=dirPath + "/" + p + "spec.md")
markdownFiles = self.relatedFilesResults.markdown(
filepath=dirPath + "/" + p + "relatedFiles.md")
else:
markdownSources = self.sourceResults.markdown()
markdownPhot = self.photResults.markdown()
markdownSpec = self.specResults.markdown()
markdownFiles = self.relatedFilesResults.markdown()
return markdownSources, markdownPhot, markdownSpec, markdownFiles
|
*Render the results in markdown format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `markdownSources` -- the top-level transient data
- `markdownPhot` -- all photometry associated with the transients
- `markdownSpec` -- all spectral data associated with the transients
- `markdownFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in markdown table format:
.. code-block:: python
markdownSources, markdownPhot, markdownSpec, markdownFiles = tns.markdown()
print markdownSources
.. code-block:: text
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
|:---------|:-----------|:---------------|:------------|:-------------|:--------------|:----------|:---------|:---------------|:----------|:---------|:---------------|:---------------------|:----------------------------------------------|:--------------|:--------------|:------------------|:-----------------------|:----------------------|
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.markdown("~/tns")
.. image:: https://i.imgur.com/AYLBQoJ.png
:width: 800px
:alt: markdown output
|
train
|
https://github.com/thespacedoctor/transientNamer/blob/39be410c84275ed4669632f5df67e728d66a318f/transientNamer/search.py#L432-L487
|
[
"def _file_prefix(\n self):\n \"\"\"*Generate a file prefix based on the type of search for saving files to disk*\n\n **Return:**\n - ``prefix`` -- the file prefix\n \"\"\"\n self.log.info('starting the ``_file_prefix`` method')\n\n if self.ra:\n now = datetime.now()\n prefix = now.strftime(\"%Y%m%dt%H%M%S%f_tns_conesearch_\")\n elif self.name:\n prefix = self.name + \"_tns_conesearch_\"\n elif self.internal_name:\n prefix = self.internal_name + \"_tns_conesearch_\"\n elif self.discInLastDays:\n discInLastDays = str(self.discInLastDays)\n now = datetime.now()\n prefix = now.strftime(\n discInLastDays + \"d_since_%Y%m%d_tns_conesearch_\")\n\n self.log.info('completed the ``_file_prefix`` method')\n return prefix\n"
] |
class search():
"""
*The worker class for the transient namer search module*
**Key Arguments:**
- ``log`` -- logger
- ``settings`` -- the settings dictionary
- ``ra`` -- RA of the location being checked
- ``dec`` -- DEC of the location being searched
- ``radiusArcsec`` - the radius of the conesearch to perform against the TNS
- ``name`` -- name of the object to search the TNS for
- ``discInLastDays`` -- search the TNS for transient discovered in the last X days
- ``comments`` -- print the comments from the TNS, note these can be long making table outputs somewhat unreadable. Default *False*
**Usage:**
To initiate a search object to search the TNS via an object name (either TNS or survey names accepted):
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
name="Gaia16bbi"
)
or for a conesearch use something similar to:
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
ra="06:50:36.74",
dec="+31:06:44.7",
radiusArcsec=5
)
Note the search method can accept coordinates in sexagesimal or decimal defree formats.
To list all new objects discovered in the last three weeks, then use:
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
discInLastDays=21
)
"""
# Initialisation
def __init__(
self,
log,
ra="",
dec="",
radiusArcsec="",
name="",
discInLastDays="",
settings=False,
comments=False
):
self.log = log
log.debug("instansiating a new 'search' object")
self.settings = settings
self.ra = ra
self.dec = dec
self.radiusArcsec = radiusArcsec
self.comments = comments
self.name = name
self.internal_name = ""
self.discInLastDays = discInLastDays
self.page = 0
self.batchSize = 1000
# CREATE THE TIME-RANGE WINDOW TO SEARCH TNS
if not discInLastDays:
self.start = ""
self.end = ""
else:
discInLastDays = int(discInLastDays)
td = timedelta(days=1)
end = datetime.now() + td
self.end = end.strftime("%Y-%m-%d")
td = timedelta(days=discInLastDays)
start = datetime.now() - td
self.start = start.strftime("%Y-%m-%d")
# DETERMINE IF WE HAVE A TNS OR INTERAL SURVEY NAME
if self.name:
matchObject = re.match(r'^((SN|AT) ?)?(\d{4}\w{1,6})', self.name)
if matchObject:
self.name = matchObject.group(3)
else:
self.internal_name = self.name
self.name = ""
# DO THE SEARCH OF THE TNS AND COMPILE THE RESULTS INTO SEPARATE RESULT
# SETS
self.sourceResultsList, self.photResultsList, self.specResultsList, self.relatedFilesResultsList = self._query_tns()
self.sourceResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.sourceResultsList
)
self.photResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.photResultsList
)
self.specResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.specResultsList
)
self.relatedFilesResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.relatedFilesResultsList
)
return None
@property
def sources(
self):
"""*The results of the search returned as a python list of dictionaries*
**Usage:**
.. code-block:: python
sources = tns.sources
"""
sourceResultsList = []
sourceResultsList[:] = [dict(l) for l in self.sourceResultsList]
return sourceResultsList
@property
def spectra(
self):
"""*The associated source spectral data*
**Usage:**
.. code-block:: python
sourceSpectra = tns.spectra
"""
specResultsList = []
specResultsList[:] = [dict(l) for l in self.specResultsList]
return specResultsList
@property
def files(
self):
"""*The associated source files*
**Usage:**
.. code-block:: python
sourceFiles = tns.files
"""
relatedFilesResultsList = []
relatedFilesResultsList[:] = [dict(l)
for l in self.relatedFilesResultsList]
return relatedFilesResultsList
@property
def photometry(
self):
"""*The associated source photometry*
**Usage:**
.. code-block:: python
sourcePhotometry = tns.photometry
"""
photResultsList = []
photResultsList[:] = [dict(l) for l in self.photResultsList]
return photResultsList
@property
def url(
self):
"""*The generated URL used for searching of the TNS*
**Usage:**
.. code-block:: python
searchURL = tns.url
"""
return self._searchURL
def csv(
self,
dirPath=None):
"""*Render the results in csv format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `csvSources` -- the top-level transient data
- `csvPhot` -- all photometry associated with the transients
- `csvSpec` -- all spectral data associated with the transients
- `csvFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in csv format:
.. code-block:: python
csvSources, csvPhot, csvSpec, csvFiles = tns.csv()
print csvSources
.. code-block:: text
TNSId,TNSName,discoveryName,discSurvey,raSex,decSex,raDeg,decDeg,transRedshift,specType,discMag,discMagFilter,discDate,objectUrl,hostName,hostRedshift,separationArcsec,separationNorthArcsec,separationEastArcsec
2016asf,SN2016asf,ASASSN-16cs,ASAS-SN,06:50:36.73,+31:06:45.36,102.6530,31.1126,0.021,SN Ia,17.1,V-Johnson,2016-03-06 08:09:36,http://wis-tns.weizmann.ac.il/object/2016asf,KUG 0647+311,,0.66,0.65,-0.13
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.csv("~/tns")
.. image:: https://i.imgur.com/BwwqMBg.png
:width: 800px
:alt: csv output
"""
if dirPath:
p = self._file_prefix()
csvSources = self.sourceResults.csv(
filepath=dirPath + "/" + p + "sources.csv")
csvPhot = self.photResults.csv(
filepath=dirPath + "/" + p + "phot.csv")
csvSpec = self.specResults.csv(
filepath=dirPath + "/" + p + "spec.csv")
csvFiles = self.relatedFilesResults.csv(
filepath=dirPath + "/" + p + "relatedFiles.csv")
else:
csvSources = self.sourceResults.csv()
csvPhot = self.photResults.csv()
csvSpec = self.specResults.csv()
csvFiles = self.relatedFilesResults.csv()
return csvSources, csvPhot, csvSpec, csvFiles
def json(
self,
dirPath=None):
"""*Render the results in json format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `jsonSources` -- the top-level transient data
- `jsonPhot` -- all photometry associated with the transients
- `jsonSpec` -- all spectral data associated with the transients
- `jsonFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in json format:
.. code-block:: python
jsonSources, jsonPhot, jsonSpec, jsonFiles = tns.json()
print jsonSources
.. code-block:: text
[
{
"TNSId": "2016asf",
"TNSName": "SN2016asf",
"decDeg": 31.1126,
"decSex": "+31:06:45.36",
"discDate": "2016-03-06 08:09:36",
"discMag": "17.1",
"discMagFilter": "V-Johnson",
"discSurvey": "ASAS-SN",
"discoveryName": "ASASSN-16cs",
"hostName": "KUG 0647+311",
"hostRedshift": null,
"objectUrl": "http://wis-tns.weizmann.ac.il/object/2016asf",
"raDeg": 102.65304166666667,
"raSex": "06:50:36.73",
"separationArcsec": "0.66",
"separationEastArcsec": "-0.13",
"separationNorthArcsec": "0.65",
"specType": "SN Ia",
"transRedshift": "0.021"
}
]
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.json("~/tns")
.. image:: https://i.imgur.com/wAHqARI.png
:width: 800px
:alt: json output
"""
if dirPath:
p = self._file_prefix()
jsonSources = self.sourceResults.json(
filepath=dirPath + "/" + p + "sources.json")
jsonPhot = self.photResults.json(
filepath=dirPath + "/" + p + "phot.json")
jsonSpec = self.specResults.json(
filepath=dirPath + "/" + p + "spec.json")
jsonFiles = self.relatedFilesResults.json(
filepath=dirPath + "/" + p + "relatedFiles.json")
else:
jsonSources = self.sourceResults.json()
jsonPhot = self.photResults.json()
jsonSpec = self.specResults.json()
jsonFiles = self.relatedFilesResults.json()
return jsonSources, jsonPhot, jsonSpec, jsonFiles
def yaml(
self,
dirPath=None):
"""*Render the results in yaml format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `yamlSources` -- the top-level transient data
- `yamlPhot` -- all photometry associated with the transients
- `yamlSpec` -- all spectral data associated with the transients
- `yamlFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in yaml format:
.. code-block:: python
yamlSources, yamlPhot, yamlSpec, yamlFiles = tns.yaml()
print yamlSources
.. code-block:: text
- TNSId: 2016asf
TNSName: SN2016asf
decDeg: 31.1126
decSex: '+31:06:45.36'
discDate: '2016-03-06 08:09:36'
discMag: '17.1'
discMagFilter: V-Johnson
discSurvey: ASAS-SN
discoveryName: ASASSN-16cs
hostName: KUG 0647+311
hostRedshift: null
objectUrl: http://wis-tns.weizmann.ac.il/object/2016asf
raDeg: 102.65304166666667
raSex: '06:50:36.73'
separationArcsec: '0.66'
separationEastArcsec: '-0.13'
separationNorthArcsec: '0.65'
specType: SN Ia
transRedshift: '0.021'
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.yaml("~/tns")
.. image:: https://i.imgur.com/ZpJIC6p.png
:width: 800px
:alt: yaml output
"""
if dirPath:
p = self._file_prefix()
yamlSources = self.sourceResults.yaml(
filepath=dirPath + "/" + p + "sources.yaml")
yamlPhot = self.photResults.yaml(
filepath=dirPath + "/" + p + "phot.yaml")
yamlSpec = self.specResults.yaml(
filepath=dirPath + "/" + p + "spec.yaml")
yamlFiles = self.relatedFilesResults.yaml(
filepath=dirPath + "/" + p + "relatedFiles.yaml")
else:
yamlSources = self.sourceResults.yaml()
yamlPhot = self.photResults.yaml()
yamlSpec = self.specResults.yaml()
yamlFiles = self.relatedFilesResults.yaml()
return yamlSources, yamlPhot, yamlSpec, yamlFiles
def table(
self,
dirPath=None):
"""*Render the results as an ascii table*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `tableSources` -- the top-level transient data
- `tablePhot` -- all photometry associated with the transients
- `tableSpec` -- all spectral data associated with the transients
- `tableFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in ascii table format:
.. code-block:: python
tableSources, tablePhot, tableSpec, tableFiles = tns.table()
print tableSources
.. code-block:: text
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.table("~/tns")
.. image:: https://i.imgur.com/m09M0ho.png
:width: 800px
:alt: ascii files
"""
if dirPath:
p = self._file_prefix()
tableSources = self.sourceResults.table(
filepath=dirPath + "/" + p + "sources.ascii")
tablePhot = self.photResults.table(
filepath=dirPath + "/" + p + "phot.ascii")
tableSpec = self.specResults.table(
filepath=dirPath + "/" + p + "spec.ascii")
tableFiles = self.relatedFilesResults.table(
filepath=dirPath + "/" + p + "relatedFiles.ascii")
else:
tableSources = self.sourceResults.table()
tablePhot = self.photResults.table()
tableSpec = self.specResults.table()
tableFiles = self.relatedFilesResults.table()
return tableSources, tablePhot, tableSpec, tableFiles
def mysql(
self,
tableNamePrefix="TNS",
dirPath=None):
"""*Render the results as MySQL Insert statements*
**Key Arguments:**
- ``tableNamePrefix`` -- the prefix for the database table names to assign the insert statements to. Default *TNS*.
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `mysqlSources` -- the top-level transient data
- `mysqlPhot` -- all photometry associated with the transients
- `mysqlSpec` -- all spectral data associated with the transients
- `mysqlFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in mysql insert format:
.. code-block:: python
mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles = tns.mysql("TNS")
print mysqlSources
.. code-block:: text
INSERT INTO `TNS_sources` (TNSId,TNSName,dateCreated,decDeg,decSex,discDate,discMag,discMagFilter,discSurvey,discoveryName,hostName,hostRedshift,objectUrl,raDeg,raSex,separationArcsec,separationEastArcsec,separationNorthArcsec,specType,transRedshift) VALUES ("2016asf" ,"SN2016asf" ,"2016-09-20T11:22:13" ,"31.1126" ,"+31:06:45.36" ,"2016-03-06 08:09:36" ,"17.1" ,"V-Johnson" ,"ASAS-SN" ,"ASASSN-16cs" ,"KUG 0647+311" ,null ,"http://wis-tns.weizmann.ac.il/object/2016asf" ,"102.653041667" ,"06:50:36.73" ,"0.66" ,"-0.13" ,"0.65" ,"SN Ia" ,"0.021") ON DUPLICATE KEY UPDATE TNSId="2016asf", TNSName="SN2016asf", dateCreated="2016-09-20T11:22:13", decDeg="31.1126", decSex="+31:06:45.36", discDate="2016-03-06 08:09:36", discMag="17.1", discMagFilter="V-Johnson", discSurvey="ASAS-SN", discoveryName="ASASSN-16cs", hostName="KUG 0647+311", hostRedshift=null, objectUrl="http://wis-tns.weizmann.ac.il/object/2016asf", raDeg="102.653041667", raSex="06:50:36.73", separationArcsec="0.66", separationEastArcsec="-0.13", separationNorthArcsec="0.65", specType="SN Ia", transRedshift="0.021", updated=1, dateLastModified=NOW() ;
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.mysql("TNS", "~/tns")
.. image:: https://i.imgur.com/CozySPW.png
:width: 800px
:alt: mysql output
"""
if dirPath:
p = self._file_prefix()
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_sources` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(20) NOT NULL,
`TNSName` varchar(20) DEFAULT NULL,
`dateCreated` datetime DEFAULT NULL,
`decDeg` double DEFAULT NULL,
`decSex` varchar(45) DEFAULT NULL,
`discDate` datetime DEFAULT NULL,
`discMag` double DEFAULT NULL,
`discMagFilter` varchar(45) DEFAULT NULL,
`discSurvey` varchar(100) DEFAULT NULL,
`discoveryName` varchar(100) DEFAULT NULL,
`objectUrl` varchar(200) DEFAULT NULL,
`raDeg` double DEFAULT NULL,
`raSex` varchar(45) DEFAULT NULL,
`specType` varchar(100) DEFAULT NULL,
`transRedshift` double DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`hostName` VARCHAR(100) NULL DEFAULT NULL,
`hostRedshift` DOUBLE NULL DEFAULT NULL,
`survey` VARCHAR(100) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid` (`TNSId`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlSources = self.sourceResults.mysql(
tableNamePrefix + "_sources", filepath=dirPath + "/" + p + "sources.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_photometry` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(20) NOT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`exptime` double DEFAULT NULL,
`filter` varchar(100) DEFAULT NULL,
`limitingMag` tinyint(4) DEFAULT NULL,
`mag` double DEFAULT NULL,
`magErr` double DEFAULT NULL,
`magUnit` varchar(100) DEFAULT NULL,
`objectName` varchar(100) DEFAULT NULL,
`obsdate` datetime DEFAULT NULL,
`reportAddedDate` datetime DEFAULT NULL,
`suggestedType` varchar(100) DEFAULT NULL,
`survey` varchar(100) DEFAULT NULL,
`telescope` varchar(100) DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`remarks` VARCHAR(800) NULL DEFAULT NULL,
`sourceComment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid_survey_obsdate` (`TNSId`,`survey`,`obsdate`),
UNIQUE INDEX `u_tnsid_survey_obsdate` (`TNSId` ASC, `survey` ASC, `obsdate` ASC),
UNIQUE INDEX `u_tnsid_obsdate_objname` (`TNSId` ASC, `obsdate` ASC, `objectName` ASC)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlPhot = self.photResults.mysql(
tableNamePrefix + "_photometry", filepath=dirPath + "/" + p + "phot.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_spectra` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(45) NOT NULL,
`TNSuser` varchar(45) DEFAULT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`exptime` double DEFAULT NULL,
`obsdate` datetime DEFAULT NULL,
`reportAddedDate` datetime DEFAULT NULL,
`specType` varchar(100) DEFAULT NULL,
`survey` varchar(100) DEFAULT NULL,
`telescope` varchar(100) DEFAULT NULL,
`transRedshift` double DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`remarks` VARCHAR(800) NULL DEFAULT NULL,
`sourceComment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `u_tnsid_survey_obsdate` (`TNSId`,`survey`,`obsdate`),
UNIQUE KEY `u_id_user_obsdate` (`TNSId`,`TNSuser`,`obsdate`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlSpec = self.specResults.mysql(
tableNamePrefix + "_spectra", filepath=dirPath + "/" + p + "spec.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_files` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(100) NOT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`dateObs` datetime DEFAULT NULL,
`filename` varchar(200) DEFAULT NULL,
`spec1phot2` tinyint(4) DEFAULT NULL,
`url` varchar(800) DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`comment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid_url` (`TNSId`,`url`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlFiles = self.relatedFilesResults.mysql(
tableNamePrefix + "_files", filepath=dirPath + "/" + p + "relatedFiles.sql", createStatement=createStatement)
else:
mysqlSources = self.sourceResults.mysql(
tableNamePrefix + "_sources")
mysqlPhot = self.photResults.mysql(tableNamePrefix + "_photometry")
mysqlSpec = self.specResults.mysql(tableNamePrefix + "_spectra")
mysqlFiles = self.relatedFilesResults.mysql(
tableNamePrefix + "_files")
return mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles
def _query_tns(self):
"""
*determine how to query the TNS, send query and parse the results*
**Return:**
- ``results`` -- a list of dictionaries (one dictionary for each result set returned from the TNS)
"""
self.log.info('starting the ``get`` method')
sourceTable = []
photoTable = []
specTable = []
relatedFilesTable = []
# THIS stop IS TO KEEP TRACK OF THE TNS PAGINATION IF MANY RESULT PAGES
# ARE RETURNED
stop = False
sourceCount = 0
while not stop:
status_code, content, self._searchURL = self._get_tns_search_results()
if status_code != 200:
self.log.error(
'cound not get the search reuslts from the TNS, HTML error code %(status_code)s ' % locals())
return None
if "No results found" in content:
print "No results found"
return sourceTable, photoTable, specTable, relatedFilesTable
if self._parse_transient_rows(content, True) < self.batchSize:
stop = True
else:
self.page += 1
thisPage = self.page
print "Downloaded %(thisPage)s page(s) from the TNS. %(sourceCount)s transients parsed so far." % locals()
sourceCount += self.batchSize
print "\t" + self._searchURL
timesleep.sleep(1)
# PARSE ALL ROWS RETURNED
for transientRow in self._parse_transient_rows(content):
# TOP LEVEL DISCOVERY CONTENT
sourceContent = transientRow.group()
discInfo, TNSId = self._parse_discovery_information(
sourceContent)
sourceTable.append(discInfo)
# PHOTOMETERY
phot, relatedFiles = self._parse_photometry_data(
sourceContent, TNSId)
photoTable += phot
relatedFilesTable += relatedFiles
# SPECTRA
spec, relatedFiles = self._parse_spectral_data(
sourceContent, TNSId)
specTable += spec
relatedFilesTable += relatedFiles
# SORT BY SEPARATION FROM THE SEARCH COORDINATES
try:
sourceTable = sorted(sourceTable, key=itemgetter(
'separationArcsec'), reverse=False)
except:
pass
self.log.info('completed the ``get`` method')
return sourceTable, photoTable, specTable, relatedFilesTable
def _get_tns_search_results(
self):
"""
*query the tns and result the response*
"""
self.log.info('starting the ``_get_tns_search_results`` method')
try:
response = requests.get(
url="http://wis-tns.weizmann.ac.il/search",
params={
"page": self.page,
"ra": self.ra,
"decl": self.dec,
"radius": self.radiusArcsec,
"name": self.name,
"internal_name": self.internal_name,
"date_start[date]": self.start,
"date_end[date]": self.end,
"num_page": self.batchSize,
"display[redshift]": "1",
"display[hostname]": "1",
"display[host_redshift]": "1",
"display[source_group_name]": "1",
"display[internal_name]": "1",
"display[spectra_count]": "1",
"display[discoverymag]": "1",
"display[discmagfilter]": "1",
"display[discoverydate]": "1",
"display[discoverer]": "1",
"display[sources]": "1",
"display[bibcode]": "1",
},
)
except requests.exceptions.RequestException:
print('HTTP Request failed')
self.log.info('completed the ``_get_tns_search_results`` method')
return response.status_code, response.content, response.url
def _file_prefix(
self):
"""*Generate a file prefix based on the type of search for saving files to disk*
**Return:**
- ``prefix`` -- the file prefix
"""
self.log.info('starting the ``_file_prefix`` method')
if self.ra:
now = datetime.now()
prefix = now.strftime("%Y%m%dt%H%M%S%f_tns_conesearch_")
elif self.name:
prefix = self.name + "_tns_conesearch_"
elif self.internal_name:
prefix = self.internal_name + "_tns_conesearch_"
elif self.discInLastDays:
discInLastDays = str(self.discInLastDays)
now = datetime.now()
prefix = now.strftime(
discInLastDays + "d_since_%Y%m%d_tns_conesearch_")
self.log.info('completed the ``_file_prefix`` method')
return prefix
def _parse_transient_rows(
self,
content,
count=False):
"""* parse transient rows from the TNS result page content*
**Key Arguments:**
- ``content`` -- the content from the TNS results page.
- ``count`` -- return only the number of rows
**Return:**
- ``transientRows``
"""
self.log.info('starting the ``_parse_transient_rows`` method')
regexForRow = r"""\n([^\n]*?<a href="/object/.*?)(?=\n[^\n]*?<a href="/object/|<\!\-\- /\.section, /#content \-\->)"""
if count:
# A SINGLE SOURCE BLOCK
matchedSources = re.findall(
regexForRow,
content,
flags=re.S # re.S
)
return len(matchedSources)
# A SINGLE SOURCE BLOCK
matchedSources = re.finditer(
regexForRow,
content,
flags=re.S # re.S
)
self.log.info('completed the ``_parse_transient_rows`` method')
return matchedSources
def _parse_discovery_information(
self,
content):
"""* parse discovery information from one row on the TNS results page*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page.
**Return:**
- ``discoveryData`` -- dictionary of results
- ``TNSId`` -- the unique TNS id for the transient
"""
self.log.info('starting the ``_parse_discovery_information`` method')
# ASTROCALC UNIT CONVERTER OBJECT
converter = unit_conversion(
log=self.log
)
matches = re.finditer(
r"""<tr class="row-.*?"><td class="cell-id">(?P<tnsId>\d*?)</td><td class="cell-name"><a href="(?P<objectUrl>.*?)">(?P<TNSName>.*?)</a></td><td class="cell-.*?<td class="cell-ra">(?P<raSex>.*?)</td><td class="cell-decl">(?P<decSex>.*?)</td><td class="cell-ot_name">(?P<specType>.*?)</td><td class="cell-redshift">(?P<transRedshift>.*?)</td><td class="cell-hostname">(?P<hostName>.*?)</td><td class="cell-host_redshift">(?P<hostRedshift>.*?)</td><td class="cell-source_group_name">(?P<discSurvey>.*?)</td>.*?<td class="cell-internal_name">(<a.*?>)?(?P<discoveryName>.*?)(</a>)?</td>.*?<td class="cell-discoverymag">(?P<discMag>.*?)</td><td class="cell-disc_filter_name">(?P<discMagFilter>.*?)</td><td class="cell-discoverydate">(?P<discDate>.*?)</td><td class="cell-discoverer">(?P<sender>.*?)</td>.*?</tr>""",
content,
flags=0 # re.S
)
discoveryData = []
for match in matches:
row = match.groupdict()
for k, v in row.iteritems():
row[k] = v.strip()
if len(v) == 0:
row[k] = None
if row["transRedshift"] == 0:
row["transRedshift"] = None
if row["TNSName"][0] in ["1", "2"]:
row["TNSName"] = "SN" + row["TNSName"]
row["objectUrl"] = "http://wis-tns.weizmann.ac.il" + \
row["objectUrl"]
# CONVERT COORDINATES TO DECIMAL DEGREES
row["raDeg"] = converter.ra_sexegesimal_to_decimal(
ra=row["raSex"]
)
row["decDeg"] = converter.dec_sexegesimal_to_decimal(
dec=row["decSex"]
)
# IF THIS IS A COORDINATE SEARCH, ADD SEPARATION FROM
# ORIGINAL QUERY COORDINATES
if self.ra:
# CALCULATE SEPARATION IN ARCSEC
from astrocalc.coords import separations
calculator = separations(
log=self.log,
ra1=self.ra,
dec1=self.dec,
ra2=row["raDeg"],
dec2=row["decDeg"],
)
angularSeparation, north, east = calculator.get()
row["separationArcsec"] = angularSeparation
row["separationNorthArcsec"] = north
row["separationEastArcsec"] = east
if not row["discSurvey"]:
row["survey"] = row["sender"]
del row["sender"]
del row["tnsId"]
row["TNSName"] = row["TNSName"].replace(" ", "")
row["TNSId"] = row["TNSName"].replace(
"SN", "").replace("AT", "")
TNSId = row["TNSId"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "TNSName", "discoveryName", "discSurvey", "raSex", "decSex", "raDeg", "decDeg",
"transRedshift", "specType", "discMag", "discMagFilter", "discDate", "objectUrl", "hostName", "hostRedshift", "separationArcsec", "separationNorthArcsec", "separationEastArcsec"]
for k, v in row.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = row[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
discoveryData.append(row)
self.log.info('completed the ``_parse_discovery_information`` method')
return discoveryData[0], TNSId
def _parse_photometry_data(
self,
content,
TNSId):
"""*parse photometry data from a row in the tns results content*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page
- ``TNSId`` -- the tns id of the transient
**Return:**
- ``photData`` -- a list of dictionaries of the photometry data
- ``relatedFilesTable`` -- a list of dictionaries of transient photometry related files
"""
self.log.info('starting the ``_parse_photometry_data`` method')
photData = []
relatedFilesTable = []
# AT REPORT BLOCK
ATBlock = re.search(
r"""<tr class=[^\n]*?AT reportings.*?(?=<tr class=[^\n]*?Classification reportings|$)""",
content,
flags=re.S # re.S
)
if ATBlock:
ATBlock = ATBlock.group()
reports = re.finditer(
r"""<tr class="row-[^"]*"><td class="cell-id">.*?</table>""",
ATBlock,
flags=re.S # re.S
)
relatedFiles = self._parse_related_files(ATBlock)
for r in reports:
header = re.search(
r"""<tr class="row[^"]*".*?time_received">(?P<reportAddedDate>[^<]*).*?user_name">(?P<sender>[^<]*).*?reporter_name">(?P<reporters>[^<]*).*?source_group_name">(?P<surveyGroup>[^<]*).*?ra">(?P<ra>[^<]*).*?decl">(?P<dec>[^<]*).*?discovery_date">(?P<obsDate>[^<]*).*?flux">(?P<mag>[^<]*).*?filter_name">(?P<magFilter>[^<]*).*?related_files">(?P<relatedFiles>[^<]*).*?type_name">(?P<suggestedType>[^<]*).*?hostname">(?P<hostName>[^<]*).*?host_redshift">(?P<hostRedshift>[^<]*).*?internal_name">(?P<objectName>[^<]*).*?groups">(?P<survey>[^<]*).*?remarks">(?P<sourceComment>[^<]*)""",
r.group(),
flags=0 # re.S
)
try:
header = header.groupdict()
except:
print r.group()
header["TNSId"] = TNSId
del header["reporters"]
del header["surveyGroup"]
del header["hostName"]
del header["hostRedshift"]
del header["mag"]
del header["magFilter"]
del header["obsDate"]
del header["ra"]
del header["dec"]
if not self.comments:
del header['sourceComment']
else:
theseComments = header[
"sourceComment"].split("\n")
header["sourceComment"] = ""
for c in theseComments:
header["sourceComment"] += " " + c.strip()
header["sourceComment"] = header[
"sourceComment"].strip().replace('"', "'")[0:750]
phot = re.finditer(
r"""<tr class="row\-[^"]*".*?obsdate">(?P<obsdate>[^<]*).*?flux">(?P<mag>[^<]*).*?fluxerr">(?P<magErr>[^<]*).*?limflux">(?P<limitingMag>[^<]*).*?unit_name">(?P<magUnit>[^<]*).*?filter_name">(?P<filter>[^<]*).*?tel_inst">(?P<telescope>[^<]*).*?exptime">(?P<exptime>[^<]*).*?observer">(?P<observer>[^<]*).*?-remarks">(?P<remarks>[^<]*)""",
r.group(),
flags=0 # re.S
)
filesAppended = False
for p in phot:
p = p.groupdict()
del p["observer"]
if p["limitingMag"] and not p["mag"]:
p["mag"] = p["limitingMag"]
p["limitingMag"] = 1
p["remarks"] = p["remarks"].replace(
"[Last non detection]", "")
else:
p["limitingMag"] = 0
if not self.comments:
del p["remarks"]
p.update(header)
if p["relatedFiles"] and filesAppended == False:
filesAppended = True
for f in relatedFiles:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = f[
"filepath"].split("/")[-1]
thisFile["url"] = f["filepath"]
if self.comments:
thisFile["comment"] = f[
"fileComment"].replace("\n", " ").strip().replace('"', "'")[0:750]
thisFile["dateObs"] = p["obsdate"]
thisFile["spec1phot2"] = 2
relatedFilesTable.append(thisFile)
if not p["survey"] and not p["objectName"]:
p["survey"] = p["sender"]
del p["relatedFiles"]
del p["sender"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "survey", "obsdate", "filter", "limitingMag", "mag", "magErr",
"magUnit", "suggestedType", "telescope", "exptime", "reportAddedDate"]
for k, v in p.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = p[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
photData.append(orow)
self.log.info('completed the ``_parse_photometry_data`` method')
return photData, relatedFilesTable
def _parse_related_files(
self,
content):
"""*parse the contents for related files URLs and comments*
**Key Arguments:**
- ``content`` -- the content to parse.
**Return:**
- ``relatedFiles`` -- a list of dictionaries of transient related files
"""
self.log.info('starting the ``_parse_related_files`` method')
relatedFilesList = re.finditer(
r"""<td class="cell-filename">.*?href="(?P<filepath>[^"]*).*?remarks">(?P<fileComment>[^<]*)""",
content,
flags=0 # re.S
)
relatedFiles = []
for f in relatedFilesList:
f = f.groupdict()
relatedFiles.append(f)
self.log.info('completed the ``_parse_related_files`` method')
return relatedFiles
def _parse_spectral_data(
self,
content,
TNSId):
"""*parse spectra data from a row in the tns results content*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page
- ``TNSId`` -- the tns id of the transient
**Return:**
- ``specData`` -- a list of dictionaries of the spectral data
- ``relatedFilesTable`` -- a list of dictionaries of transient spectrum related files
"""
self.log.info('starting the ``_parse_spectral_data`` method')
specData = []
relatedFilesTable = []
# CLASSIFICATION BLOCK
classBlock = re.search(
r"""<tr class=[^\n]*?Classification reportings.*$""",
content,
flags=re.S # re.S
)
if classBlock:
classBlock = classBlock.group()
reports = re.finditer(
r"""<tr class="row-[^"]*"><td class="cell-id">.*?</tbody>\s*</table>\s*</div></td> </tr>\s*</tbody>\s*</table>\s*</div></td> </tr>""",
classBlock,
flags=re.S #
)
relatedFiles = self._parse_related_files(classBlock)
for r in reports:
header = re.search(
r"""<tr class="row.*?time_received">(?P<reportAddedDate>[^<]*).*?user_name">(?P<TNSuser>[^<]*).*?classifier_name">(?P<reporters>[^<]*).*?source_group_name">(?P<survey>[^<]*).*?-type">(?P<specType>[^<]*).*?-redshift">(?P<transRedshift>[^<]*).*?-related_files">(?P<relatedFiles>[^<]*).*?-groups">(?P<surveyGroup>[^<]*).*?-remarks">(?P<sourceComment>[^<]*)</td>""",
r.group(),
flags=re.S # re.S
)
if not header:
continue
header = header.groupdict()
header["TNSId"] = TNSId
del header["reporters"]
del header["surveyGroup"]
del header["survey"]
if not self.comments:
del header['sourceComment']
else:
theseComments = header[
"sourceComment"].split("\n")
header["sourceComment"] = ""
for c in theseComments:
header["sourceComment"] += " " + c.strip()
header["sourceComment"] = header[
"sourceComment"].strip().replace('"', "'")[0:750]
spec = re.finditer(
r"""<tr class="class-results-.*?-obsdate">(?P<obsdate>[^<]*).*?-tel_inst">(?P<telescope>[^<]*).*?-exptime">(?P<exptime>[^<]*).*?-observer">(?P<sender>[^<]*).*?-reducer">(?P<reducer>[^<]*).*?-source_group_name">(?P<survey>[^<]*).*?-asciifile">(.*?<a href="(?P<filepath>[^"]*)".*?</a>)?.*?-fitsfile">(.*?<a href="(?P<fitsFilepath>[^"]*)".*?</a>)?.*?-groups">(?P<surveyGroup>[^<]*).*?-remarks">(?P<remarks>[^<]*)""",
r.group(),
flags=0 # re.S
)
filesAppended = False
for s in spec:
s = s.groupdict()
del s["sender"]
del s["surveyGroup"]
del s["reducer"]
if not self.comments:
del s["remarks"]
else:
s["remarks"] = s["remarks"].replace('"', "'")[0:750]
s.update(header)
if s["relatedFiles"] and filesAppended == False:
filesAppended = True
for f in relatedFiles:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = f[
"filepath"].split("/")[-1]
thisFile["url"] = f["filepath"]
if self.comments:
thisFile["comment"] = f[
"fileComment"].replace("\n", " ").strip()
thisFile["dateObs"] = s["obsdate"]
thisFile["spec1phot2"] = 1
relatedFilesTable.append(thisFile)
for ffile in [s["filepath"], s["fitsFilepath"]]:
if ffile:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = ffile.split(
"/")[-1]
thisFile["url"] = ffile
if self.comments:
thisFile["comment"] = ""
thisFile["dateObs"] = s["obsdate"]
thisFile["spec1phot2"] = 1
relatedFilesTable.append(thisFile)
del s["filepath"]
del s["fitsFilepath"]
del s["relatedFiles"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "survey", "obsdate", "specType", "transRedshift",
"telescope", "exptime", "reportAddedDate", "TNSuser"]
for k, v in s.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = s[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
specData.append(orow)
self.log.info('completed the ``_parse_spectral_data`` method')
return specData, relatedFilesTable
|
thespacedoctor/transientNamer
|
transientNamer/search.py
|
search.table
|
python
|
def table(
self,
dirPath=None):
if dirPath:
p = self._file_prefix()
tableSources = self.sourceResults.table(
filepath=dirPath + "/" + p + "sources.ascii")
tablePhot = self.photResults.table(
filepath=dirPath + "/" + p + "phot.ascii")
tableSpec = self.specResults.table(
filepath=dirPath + "/" + p + "spec.ascii")
tableFiles = self.relatedFilesResults.table(
filepath=dirPath + "/" + p + "relatedFiles.ascii")
else:
tableSources = self.sourceResults.table()
tablePhot = self.photResults.table()
tableSpec = self.specResults.table()
tableFiles = self.relatedFilesResults.table()
return tableSources, tablePhot, tableSpec, tableFiles
|
*Render the results as an ascii table*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `tableSources` -- the top-level transient data
- `tablePhot` -- all photometry associated with the transients
- `tableSpec` -- all spectral data associated with the transients
- `tableFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in ascii table format:
.. code-block:: python
tableSources, tablePhot, tableSpec, tableFiles = tns.table()
print tableSources
.. code-block:: text
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.table("~/tns")
.. image:: https://i.imgur.com/m09M0ho.png
:width: 800px
:alt: ascii files
|
train
|
https://github.com/thespacedoctor/transientNamer/blob/39be410c84275ed4669632f5df67e728d66a318f/transientNamer/search.py#L489-L546
|
[
"def _file_prefix(\n self):\n \"\"\"*Generate a file prefix based on the type of search for saving files to disk*\n\n **Return:**\n - ``prefix`` -- the file prefix\n \"\"\"\n self.log.info('starting the ``_file_prefix`` method')\n\n if self.ra:\n now = datetime.now()\n prefix = now.strftime(\"%Y%m%dt%H%M%S%f_tns_conesearch_\")\n elif self.name:\n prefix = self.name + \"_tns_conesearch_\"\n elif self.internal_name:\n prefix = self.internal_name + \"_tns_conesearch_\"\n elif self.discInLastDays:\n discInLastDays = str(self.discInLastDays)\n now = datetime.now()\n prefix = now.strftime(\n discInLastDays + \"d_since_%Y%m%d_tns_conesearch_\")\n\n self.log.info('completed the ``_file_prefix`` method')\n return prefix\n"
] |
class search():
"""
*The worker class for the transient namer search module*
**Key Arguments:**
- ``log`` -- logger
- ``settings`` -- the settings dictionary
- ``ra`` -- RA of the location being checked
- ``dec`` -- DEC of the location being searched
- ``radiusArcsec`` - the radius of the conesearch to perform against the TNS
- ``name`` -- name of the object to search the TNS for
- ``discInLastDays`` -- search the TNS for transient discovered in the last X days
- ``comments`` -- print the comments from the TNS, note these can be long making table outputs somewhat unreadable. Default *False*
**Usage:**
To initiate a search object to search the TNS via an object name (either TNS or survey names accepted):
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
name="Gaia16bbi"
)
or for a conesearch use something similar to:
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
ra="06:50:36.74",
dec="+31:06:44.7",
radiusArcsec=5
)
Note the search method can accept coordinates in sexagesimal or decimal defree formats.
To list all new objects discovered in the last three weeks, then use:
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
discInLastDays=21
)
"""
# Initialisation
def __init__(
self,
log,
ra="",
dec="",
radiusArcsec="",
name="",
discInLastDays="",
settings=False,
comments=False
):
self.log = log
log.debug("instansiating a new 'search' object")
self.settings = settings
self.ra = ra
self.dec = dec
self.radiusArcsec = radiusArcsec
self.comments = comments
self.name = name
self.internal_name = ""
self.discInLastDays = discInLastDays
self.page = 0
self.batchSize = 1000
# CREATE THE TIME-RANGE WINDOW TO SEARCH TNS
if not discInLastDays:
self.start = ""
self.end = ""
else:
discInLastDays = int(discInLastDays)
td = timedelta(days=1)
end = datetime.now() + td
self.end = end.strftime("%Y-%m-%d")
td = timedelta(days=discInLastDays)
start = datetime.now() - td
self.start = start.strftime("%Y-%m-%d")
# DETERMINE IF WE HAVE A TNS OR INTERAL SURVEY NAME
if self.name:
matchObject = re.match(r'^((SN|AT) ?)?(\d{4}\w{1,6})', self.name)
if matchObject:
self.name = matchObject.group(3)
else:
self.internal_name = self.name
self.name = ""
# DO THE SEARCH OF THE TNS AND COMPILE THE RESULTS INTO SEPARATE RESULT
# SETS
self.sourceResultsList, self.photResultsList, self.specResultsList, self.relatedFilesResultsList = self._query_tns()
self.sourceResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.sourceResultsList
)
self.photResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.photResultsList
)
self.specResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.specResultsList
)
self.relatedFilesResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.relatedFilesResultsList
)
return None
@property
def sources(
self):
"""*The results of the search returned as a python list of dictionaries*
**Usage:**
.. code-block:: python
sources = tns.sources
"""
sourceResultsList = []
sourceResultsList[:] = [dict(l) for l in self.sourceResultsList]
return sourceResultsList
@property
def spectra(
self):
"""*The associated source spectral data*
**Usage:**
.. code-block:: python
sourceSpectra = tns.spectra
"""
specResultsList = []
specResultsList[:] = [dict(l) for l in self.specResultsList]
return specResultsList
@property
def files(
self):
"""*The associated source files*
**Usage:**
.. code-block:: python
sourceFiles = tns.files
"""
relatedFilesResultsList = []
relatedFilesResultsList[:] = [dict(l)
for l in self.relatedFilesResultsList]
return relatedFilesResultsList
@property
def photometry(
self):
"""*The associated source photometry*
**Usage:**
.. code-block:: python
sourcePhotometry = tns.photometry
"""
photResultsList = []
photResultsList[:] = [dict(l) for l in self.photResultsList]
return photResultsList
@property
def url(
self):
"""*The generated URL used for searching of the TNS*
**Usage:**
.. code-block:: python
searchURL = tns.url
"""
return self._searchURL
def csv(
self,
dirPath=None):
"""*Render the results in csv format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `csvSources` -- the top-level transient data
- `csvPhot` -- all photometry associated with the transients
- `csvSpec` -- all spectral data associated with the transients
- `csvFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in csv format:
.. code-block:: python
csvSources, csvPhot, csvSpec, csvFiles = tns.csv()
print csvSources
.. code-block:: text
TNSId,TNSName,discoveryName,discSurvey,raSex,decSex,raDeg,decDeg,transRedshift,specType,discMag,discMagFilter,discDate,objectUrl,hostName,hostRedshift,separationArcsec,separationNorthArcsec,separationEastArcsec
2016asf,SN2016asf,ASASSN-16cs,ASAS-SN,06:50:36.73,+31:06:45.36,102.6530,31.1126,0.021,SN Ia,17.1,V-Johnson,2016-03-06 08:09:36,http://wis-tns.weizmann.ac.il/object/2016asf,KUG 0647+311,,0.66,0.65,-0.13
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.csv("~/tns")
.. image:: https://i.imgur.com/BwwqMBg.png
:width: 800px
:alt: csv output
"""
if dirPath:
p = self._file_prefix()
csvSources = self.sourceResults.csv(
filepath=dirPath + "/" + p + "sources.csv")
csvPhot = self.photResults.csv(
filepath=dirPath + "/" + p + "phot.csv")
csvSpec = self.specResults.csv(
filepath=dirPath + "/" + p + "spec.csv")
csvFiles = self.relatedFilesResults.csv(
filepath=dirPath + "/" + p + "relatedFiles.csv")
else:
csvSources = self.sourceResults.csv()
csvPhot = self.photResults.csv()
csvSpec = self.specResults.csv()
csvFiles = self.relatedFilesResults.csv()
return csvSources, csvPhot, csvSpec, csvFiles
def json(
self,
dirPath=None):
"""*Render the results in json format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `jsonSources` -- the top-level transient data
- `jsonPhot` -- all photometry associated with the transients
- `jsonSpec` -- all spectral data associated with the transients
- `jsonFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in json format:
.. code-block:: python
jsonSources, jsonPhot, jsonSpec, jsonFiles = tns.json()
print jsonSources
.. code-block:: text
[
{
"TNSId": "2016asf",
"TNSName": "SN2016asf",
"decDeg": 31.1126,
"decSex": "+31:06:45.36",
"discDate": "2016-03-06 08:09:36",
"discMag": "17.1",
"discMagFilter": "V-Johnson",
"discSurvey": "ASAS-SN",
"discoveryName": "ASASSN-16cs",
"hostName": "KUG 0647+311",
"hostRedshift": null,
"objectUrl": "http://wis-tns.weizmann.ac.il/object/2016asf",
"raDeg": 102.65304166666667,
"raSex": "06:50:36.73",
"separationArcsec": "0.66",
"separationEastArcsec": "-0.13",
"separationNorthArcsec": "0.65",
"specType": "SN Ia",
"transRedshift": "0.021"
}
]
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.json("~/tns")
.. image:: https://i.imgur.com/wAHqARI.png
:width: 800px
:alt: json output
"""
if dirPath:
p = self._file_prefix()
jsonSources = self.sourceResults.json(
filepath=dirPath + "/" + p + "sources.json")
jsonPhot = self.photResults.json(
filepath=dirPath + "/" + p + "phot.json")
jsonSpec = self.specResults.json(
filepath=dirPath + "/" + p + "spec.json")
jsonFiles = self.relatedFilesResults.json(
filepath=dirPath + "/" + p + "relatedFiles.json")
else:
jsonSources = self.sourceResults.json()
jsonPhot = self.photResults.json()
jsonSpec = self.specResults.json()
jsonFiles = self.relatedFilesResults.json()
return jsonSources, jsonPhot, jsonSpec, jsonFiles
def yaml(
self,
dirPath=None):
"""*Render the results in yaml format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `yamlSources` -- the top-level transient data
- `yamlPhot` -- all photometry associated with the transients
- `yamlSpec` -- all spectral data associated with the transients
- `yamlFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in yaml format:
.. code-block:: python
yamlSources, yamlPhot, yamlSpec, yamlFiles = tns.yaml()
print yamlSources
.. code-block:: text
- TNSId: 2016asf
TNSName: SN2016asf
decDeg: 31.1126
decSex: '+31:06:45.36'
discDate: '2016-03-06 08:09:36'
discMag: '17.1'
discMagFilter: V-Johnson
discSurvey: ASAS-SN
discoveryName: ASASSN-16cs
hostName: KUG 0647+311
hostRedshift: null
objectUrl: http://wis-tns.weizmann.ac.il/object/2016asf
raDeg: 102.65304166666667
raSex: '06:50:36.73'
separationArcsec: '0.66'
separationEastArcsec: '-0.13'
separationNorthArcsec: '0.65'
specType: SN Ia
transRedshift: '0.021'
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.yaml("~/tns")
.. image:: https://i.imgur.com/ZpJIC6p.png
:width: 800px
:alt: yaml output
"""
if dirPath:
p = self._file_prefix()
yamlSources = self.sourceResults.yaml(
filepath=dirPath + "/" + p + "sources.yaml")
yamlPhot = self.photResults.yaml(
filepath=dirPath + "/" + p + "phot.yaml")
yamlSpec = self.specResults.yaml(
filepath=dirPath + "/" + p + "spec.yaml")
yamlFiles = self.relatedFilesResults.yaml(
filepath=dirPath + "/" + p + "relatedFiles.yaml")
else:
yamlSources = self.sourceResults.yaml()
yamlPhot = self.photResults.yaml()
yamlSpec = self.specResults.yaml()
yamlFiles = self.relatedFilesResults.yaml()
return yamlSources, yamlPhot, yamlSpec, yamlFiles
def markdown(
self,
dirPath=None):
"""*Render the results in markdown format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `markdownSources` -- the top-level transient data
- `markdownPhot` -- all photometry associated with the transients
- `markdownSpec` -- all spectral data associated with the transients
- `markdownFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in markdown table format:
.. code-block:: python
markdownSources, markdownPhot, markdownSpec, markdownFiles = tns.markdown()
print markdownSources
.. code-block:: text
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
|:---------|:-----------|:---------------|:------------|:-------------|:--------------|:----------|:---------|:---------------|:----------|:---------|:---------------|:---------------------|:----------------------------------------------|:--------------|:--------------|:------------------|:-----------------------|:----------------------|
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.markdown("~/tns")
.. image:: https://i.imgur.com/AYLBQoJ.png
:width: 800px
:alt: markdown output
"""
if dirPath:
p = self._file_prefix()
markdownSources = self.sourceResults.markdown(
filepath=dirPath + "/" + p + "sources.md")
markdownPhot = self.photResults.markdown(
filepath=dirPath + "/" + p + "phot.md")
markdownSpec = self.specResults.markdown(
filepath=dirPath + "/" + p + "spec.md")
markdownFiles = self.relatedFilesResults.markdown(
filepath=dirPath + "/" + p + "relatedFiles.md")
else:
markdownSources = self.sourceResults.markdown()
markdownPhot = self.photResults.markdown()
markdownSpec = self.specResults.markdown()
markdownFiles = self.relatedFilesResults.markdown()
return markdownSources, markdownPhot, markdownSpec, markdownFiles
def mysql(
self,
tableNamePrefix="TNS",
dirPath=None):
"""*Render the results as MySQL Insert statements*
**Key Arguments:**
- ``tableNamePrefix`` -- the prefix for the database table names to assign the insert statements to. Default *TNS*.
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `mysqlSources` -- the top-level transient data
- `mysqlPhot` -- all photometry associated with the transients
- `mysqlSpec` -- all spectral data associated with the transients
- `mysqlFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in mysql insert format:
.. code-block:: python
mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles = tns.mysql("TNS")
print mysqlSources
.. code-block:: text
INSERT INTO `TNS_sources` (TNSId,TNSName,dateCreated,decDeg,decSex,discDate,discMag,discMagFilter,discSurvey,discoveryName,hostName,hostRedshift,objectUrl,raDeg,raSex,separationArcsec,separationEastArcsec,separationNorthArcsec,specType,transRedshift) VALUES ("2016asf" ,"SN2016asf" ,"2016-09-20T11:22:13" ,"31.1126" ,"+31:06:45.36" ,"2016-03-06 08:09:36" ,"17.1" ,"V-Johnson" ,"ASAS-SN" ,"ASASSN-16cs" ,"KUG 0647+311" ,null ,"http://wis-tns.weizmann.ac.il/object/2016asf" ,"102.653041667" ,"06:50:36.73" ,"0.66" ,"-0.13" ,"0.65" ,"SN Ia" ,"0.021") ON DUPLICATE KEY UPDATE TNSId="2016asf", TNSName="SN2016asf", dateCreated="2016-09-20T11:22:13", decDeg="31.1126", decSex="+31:06:45.36", discDate="2016-03-06 08:09:36", discMag="17.1", discMagFilter="V-Johnson", discSurvey="ASAS-SN", discoveryName="ASASSN-16cs", hostName="KUG 0647+311", hostRedshift=null, objectUrl="http://wis-tns.weizmann.ac.il/object/2016asf", raDeg="102.653041667", raSex="06:50:36.73", separationArcsec="0.66", separationEastArcsec="-0.13", separationNorthArcsec="0.65", specType="SN Ia", transRedshift="0.021", updated=1, dateLastModified=NOW() ;
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.mysql("TNS", "~/tns")
.. image:: https://i.imgur.com/CozySPW.png
:width: 800px
:alt: mysql output
"""
if dirPath:
p = self._file_prefix()
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_sources` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(20) NOT NULL,
`TNSName` varchar(20) DEFAULT NULL,
`dateCreated` datetime DEFAULT NULL,
`decDeg` double DEFAULT NULL,
`decSex` varchar(45) DEFAULT NULL,
`discDate` datetime DEFAULT NULL,
`discMag` double DEFAULT NULL,
`discMagFilter` varchar(45) DEFAULT NULL,
`discSurvey` varchar(100) DEFAULT NULL,
`discoveryName` varchar(100) DEFAULT NULL,
`objectUrl` varchar(200) DEFAULT NULL,
`raDeg` double DEFAULT NULL,
`raSex` varchar(45) DEFAULT NULL,
`specType` varchar(100) DEFAULT NULL,
`transRedshift` double DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`hostName` VARCHAR(100) NULL DEFAULT NULL,
`hostRedshift` DOUBLE NULL DEFAULT NULL,
`survey` VARCHAR(100) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid` (`TNSId`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlSources = self.sourceResults.mysql(
tableNamePrefix + "_sources", filepath=dirPath + "/" + p + "sources.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_photometry` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(20) NOT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`exptime` double DEFAULT NULL,
`filter` varchar(100) DEFAULT NULL,
`limitingMag` tinyint(4) DEFAULT NULL,
`mag` double DEFAULT NULL,
`magErr` double DEFAULT NULL,
`magUnit` varchar(100) DEFAULT NULL,
`objectName` varchar(100) DEFAULT NULL,
`obsdate` datetime DEFAULT NULL,
`reportAddedDate` datetime DEFAULT NULL,
`suggestedType` varchar(100) DEFAULT NULL,
`survey` varchar(100) DEFAULT NULL,
`telescope` varchar(100) DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`remarks` VARCHAR(800) NULL DEFAULT NULL,
`sourceComment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid_survey_obsdate` (`TNSId`,`survey`,`obsdate`),
UNIQUE INDEX `u_tnsid_survey_obsdate` (`TNSId` ASC, `survey` ASC, `obsdate` ASC),
UNIQUE INDEX `u_tnsid_obsdate_objname` (`TNSId` ASC, `obsdate` ASC, `objectName` ASC)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlPhot = self.photResults.mysql(
tableNamePrefix + "_photometry", filepath=dirPath + "/" + p + "phot.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_spectra` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(45) NOT NULL,
`TNSuser` varchar(45) DEFAULT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`exptime` double DEFAULT NULL,
`obsdate` datetime DEFAULT NULL,
`reportAddedDate` datetime DEFAULT NULL,
`specType` varchar(100) DEFAULT NULL,
`survey` varchar(100) DEFAULT NULL,
`telescope` varchar(100) DEFAULT NULL,
`transRedshift` double DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`remarks` VARCHAR(800) NULL DEFAULT NULL,
`sourceComment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `u_tnsid_survey_obsdate` (`TNSId`,`survey`,`obsdate`),
UNIQUE KEY `u_id_user_obsdate` (`TNSId`,`TNSuser`,`obsdate`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlSpec = self.specResults.mysql(
tableNamePrefix + "_spectra", filepath=dirPath + "/" + p + "spec.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_files` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(100) NOT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`dateObs` datetime DEFAULT NULL,
`filename` varchar(200) DEFAULT NULL,
`spec1phot2` tinyint(4) DEFAULT NULL,
`url` varchar(800) DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`comment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid_url` (`TNSId`,`url`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlFiles = self.relatedFilesResults.mysql(
tableNamePrefix + "_files", filepath=dirPath + "/" + p + "relatedFiles.sql", createStatement=createStatement)
else:
mysqlSources = self.sourceResults.mysql(
tableNamePrefix + "_sources")
mysqlPhot = self.photResults.mysql(tableNamePrefix + "_photometry")
mysqlSpec = self.specResults.mysql(tableNamePrefix + "_spectra")
mysqlFiles = self.relatedFilesResults.mysql(
tableNamePrefix + "_files")
return mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles
def _query_tns(self):
"""
*determine how to query the TNS, send query and parse the results*
**Return:**
- ``results`` -- a list of dictionaries (one dictionary for each result set returned from the TNS)
"""
self.log.info('starting the ``get`` method')
sourceTable = []
photoTable = []
specTable = []
relatedFilesTable = []
# THIS stop IS TO KEEP TRACK OF THE TNS PAGINATION IF MANY RESULT PAGES
# ARE RETURNED
stop = False
sourceCount = 0
while not stop:
status_code, content, self._searchURL = self._get_tns_search_results()
if status_code != 200:
self.log.error(
'cound not get the search reuslts from the TNS, HTML error code %(status_code)s ' % locals())
return None
if "No results found" in content:
print "No results found"
return sourceTable, photoTable, specTable, relatedFilesTable
if self._parse_transient_rows(content, True) < self.batchSize:
stop = True
else:
self.page += 1
thisPage = self.page
print "Downloaded %(thisPage)s page(s) from the TNS. %(sourceCount)s transients parsed so far." % locals()
sourceCount += self.batchSize
print "\t" + self._searchURL
timesleep.sleep(1)
# PARSE ALL ROWS RETURNED
for transientRow in self._parse_transient_rows(content):
# TOP LEVEL DISCOVERY CONTENT
sourceContent = transientRow.group()
discInfo, TNSId = self._parse_discovery_information(
sourceContent)
sourceTable.append(discInfo)
# PHOTOMETERY
phot, relatedFiles = self._parse_photometry_data(
sourceContent, TNSId)
photoTable += phot
relatedFilesTable += relatedFiles
# SPECTRA
spec, relatedFiles = self._parse_spectral_data(
sourceContent, TNSId)
specTable += spec
relatedFilesTable += relatedFiles
# SORT BY SEPARATION FROM THE SEARCH COORDINATES
try:
sourceTable = sorted(sourceTable, key=itemgetter(
'separationArcsec'), reverse=False)
except:
pass
self.log.info('completed the ``get`` method')
return sourceTable, photoTable, specTable, relatedFilesTable
def _get_tns_search_results(
self):
"""
*query the tns and result the response*
"""
self.log.info('starting the ``_get_tns_search_results`` method')
try:
response = requests.get(
url="http://wis-tns.weizmann.ac.il/search",
params={
"page": self.page,
"ra": self.ra,
"decl": self.dec,
"radius": self.radiusArcsec,
"name": self.name,
"internal_name": self.internal_name,
"date_start[date]": self.start,
"date_end[date]": self.end,
"num_page": self.batchSize,
"display[redshift]": "1",
"display[hostname]": "1",
"display[host_redshift]": "1",
"display[source_group_name]": "1",
"display[internal_name]": "1",
"display[spectra_count]": "1",
"display[discoverymag]": "1",
"display[discmagfilter]": "1",
"display[discoverydate]": "1",
"display[discoverer]": "1",
"display[sources]": "1",
"display[bibcode]": "1",
},
)
except requests.exceptions.RequestException:
print('HTTP Request failed')
self.log.info('completed the ``_get_tns_search_results`` method')
return response.status_code, response.content, response.url
def _file_prefix(
self):
"""*Generate a file prefix based on the type of search for saving files to disk*
**Return:**
- ``prefix`` -- the file prefix
"""
self.log.info('starting the ``_file_prefix`` method')
if self.ra:
now = datetime.now()
prefix = now.strftime("%Y%m%dt%H%M%S%f_tns_conesearch_")
elif self.name:
prefix = self.name + "_tns_conesearch_"
elif self.internal_name:
prefix = self.internal_name + "_tns_conesearch_"
elif self.discInLastDays:
discInLastDays = str(self.discInLastDays)
now = datetime.now()
prefix = now.strftime(
discInLastDays + "d_since_%Y%m%d_tns_conesearch_")
self.log.info('completed the ``_file_prefix`` method')
return prefix
def _parse_transient_rows(
self,
content,
count=False):
"""* parse transient rows from the TNS result page content*
**Key Arguments:**
- ``content`` -- the content from the TNS results page.
- ``count`` -- return only the number of rows
**Return:**
- ``transientRows``
"""
self.log.info('starting the ``_parse_transient_rows`` method')
regexForRow = r"""\n([^\n]*?<a href="/object/.*?)(?=\n[^\n]*?<a href="/object/|<\!\-\- /\.section, /#content \-\->)"""
if count:
# A SINGLE SOURCE BLOCK
matchedSources = re.findall(
regexForRow,
content,
flags=re.S # re.S
)
return len(matchedSources)
# A SINGLE SOURCE BLOCK
matchedSources = re.finditer(
regexForRow,
content,
flags=re.S # re.S
)
self.log.info('completed the ``_parse_transient_rows`` method')
return matchedSources
def _parse_discovery_information(
self,
content):
"""* parse discovery information from one row on the TNS results page*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page.
**Return:**
- ``discoveryData`` -- dictionary of results
- ``TNSId`` -- the unique TNS id for the transient
"""
self.log.info('starting the ``_parse_discovery_information`` method')
# ASTROCALC UNIT CONVERTER OBJECT
converter = unit_conversion(
log=self.log
)
matches = re.finditer(
r"""<tr class="row-.*?"><td class="cell-id">(?P<tnsId>\d*?)</td><td class="cell-name"><a href="(?P<objectUrl>.*?)">(?P<TNSName>.*?)</a></td><td class="cell-.*?<td class="cell-ra">(?P<raSex>.*?)</td><td class="cell-decl">(?P<decSex>.*?)</td><td class="cell-ot_name">(?P<specType>.*?)</td><td class="cell-redshift">(?P<transRedshift>.*?)</td><td class="cell-hostname">(?P<hostName>.*?)</td><td class="cell-host_redshift">(?P<hostRedshift>.*?)</td><td class="cell-source_group_name">(?P<discSurvey>.*?)</td>.*?<td class="cell-internal_name">(<a.*?>)?(?P<discoveryName>.*?)(</a>)?</td>.*?<td class="cell-discoverymag">(?P<discMag>.*?)</td><td class="cell-disc_filter_name">(?P<discMagFilter>.*?)</td><td class="cell-discoverydate">(?P<discDate>.*?)</td><td class="cell-discoverer">(?P<sender>.*?)</td>.*?</tr>""",
content,
flags=0 # re.S
)
discoveryData = []
for match in matches:
row = match.groupdict()
for k, v in row.iteritems():
row[k] = v.strip()
if len(v) == 0:
row[k] = None
if row["transRedshift"] == 0:
row["transRedshift"] = None
if row["TNSName"][0] in ["1", "2"]:
row["TNSName"] = "SN" + row["TNSName"]
row["objectUrl"] = "http://wis-tns.weizmann.ac.il" + \
row["objectUrl"]
# CONVERT COORDINATES TO DECIMAL DEGREES
row["raDeg"] = converter.ra_sexegesimal_to_decimal(
ra=row["raSex"]
)
row["decDeg"] = converter.dec_sexegesimal_to_decimal(
dec=row["decSex"]
)
# IF THIS IS A COORDINATE SEARCH, ADD SEPARATION FROM
# ORIGINAL QUERY COORDINATES
if self.ra:
# CALCULATE SEPARATION IN ARCSEC
from astrocalc.coords import separations
calculator = separations(
log=self.log,
ra1=self.ra,
dec1=self.dec,
ra2=row["raDeg"],
dec2=row["decDeg"],
)
angularSeparation, north, east = calculator.get()
row["separationArcsec"] = angularSeparation
row["separationNorthArcsec"] = north
row["separationEastArcsec"] = east
if not row["discSurvey"]:
row["survey"] = row["sender"]
del row["sender"]
del row["tnsId"]
row["TNSName"] = row["TNSName"].replace(" ", "")
row["TNSId"] = row["TNSName"].replace(
"SN", "").replace("AT", "")
TNSId = row["TNSId"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "TNSName", "discoveryName", "discSurvey", "raSex", "decSex", "raDeg", "decDeg",
"transRedshift", "specType", "discMag", "discMagFilter", "discDate", "objectUrl", "hostName", "hostRedshift", "separationArcsec", "separationNorthArcsec", "separationEastArcsec"]
for k, v in row.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = row[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
discoveryData.append(row)
self.log.info('completed the ``_parse_discovery_information`` method')
return discoveryData[0], TNSId
def _parse_photometry_data(
self,
content,
TNSId):
"""*parse photometry data from a row in the tns results content*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page
- ``TNSId`` -- the tns id of the transient
**Return:**
- ``photData`` -- a list of dictionaries of the photometry data
- ``relatedFilesTable`` -- a list of dictionaries of transient photometry related files
"""
self.log.info('starting the ``_parse_photometry_data`` method')
photData = []
relatedFilesTable = []
# AT REPORT BLOCK
ATBlock = re.search(
r"""<tr class=[^\n]*?AT reportings.*?(?=<tr class=[^\n]*?Classification reportings|$)""",
content,
flags=re.S # re.S
)
if ATBlock:
ATBlock = ATBlock.group()
reports = re.finditer(
r"""<tr class="row-[^"]*"><td class="cell-id">.*?</table>""",
ATBlock,
flags=re.S # re.S
)
relatedFiles = self._parse_related_files(ATBlock)
for r in reports:
header = re.search(
r"""<tr class="row[^"]*".*?time_received">(?P<reportAddedDate>[^<]*).*?user_name">(?P<sender>[^<]*).*?reporter_name">(?P<reporters>[^<]*).*?source_group_name">(?P<surveyGroup>[^<]*).*?ra">(?P<ra>[^<]*).*?decl">(?P<dec>[^<]*).*?discovery_date">(?P<obsDate>[^<]*).*?flux">(?P<mag>[^<]*).*?filter_name">(?P<magFilter>[^<]*).*?related_files">(?P<relatedFiles>[^<]*).*?type_name">(?P<suggestedType>[^<]*).*?hostname">(?P<hostName>[^<]*).*?host_redshift">(?P<hostRedshift>[^<]*).*?internal_name">(?P<objectName>[^<]*).*?groups">(?P<survey>[^<]*).*?remarks">(?P<sourceComment>[^<]*)""",
r.group(),
flags=0 # re.S
)
try:
header = header.groupdict()
except:
print r.group()
header["TNSId"] = TNSId
del header["reporters"]
del header["surveyGroup"]
del header["hostName"]
del header["hostRedshift"]
del header["mag"]
del header["magFilter"]
del header["obsDate"]
del header["ra"]
del header["dec"]
if not self.comments:
del header['sourceComment']
else:
theseComments = header[
"sourceComment"].split("\n")
header["sourceComment"] = ""
for c in theseComments:
header["sourceComment"] += " " + c.strip()
header["sourceComment"] = header[
"sourceComment"].strip().replace('"', "'")[0:750]
phot = re.finditer(
r"""<tr class="row\-[^"]*".*?obsdate">(?P<obsdate>[^<]*).*?flux">(?P<mag>[^<]*).*?fluxerr">(?P<magErr>[^<]*).*?limflux">(?P<limitingMag>[^<]*).*?unit_name">(?P<magUnit>[^<]*).*?filter_name">(?P<filter>[^<]*).*?tel_inst">(?P<telescope>[^<]*).*?exptime">(?P<exptime>[^<]*).*?observer">(?P<observer>[^<]*).*?-remarks">(?P<remarks>[^<]*)""",
r.group(),
flags=0 # re.S
)
filesAppended = False
for p in phot:
p = p.groupdict()
del p["observer"]
if p["limitingMag"] and not p["mag"]:
p["mag"] = p["limitingMag"]
p["limitingMag"] = 1
p["remarks"] = p["remarks"].replace(
"[Last non detection]", "")
else:
p["limitingMag"] = 0
if not self.comments:
del p["remarks"]
p.update(header)
if p["relatedFiles"] and filesAppended == False:
filesAppended = True
for f in relatedFiles:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = f[
"filepath"].split("/")[-1]
thisFile["url"] = f["filepath"]
if self.comments:
thisFile["comment"] = f[
"fileComment"].replace("\n", " ").strip().replace('"', "'")[0:750]
thisFile["dateObs"] = p["obsdate"]
thisFile["spec1phot2"] = 2
relatedFilesTable.append(thisFile)
if not p["survey"] and not p["objectName"]:
p["survey"] = p["sender"]
del p["relatedFiles"]
del p["sender"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "survey", "obsdate", "filter", "limitingMag", "mag", "magErr",
"magUnit", "suggestedType", "telescope", "exptime", "reportAddedDate"]
for k, v in p.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = p[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
photData.append(orow)
self.log.info('completed the ``_parse_photometry_data`` method')
return photData, relatedFilesTable
def _parse_related_files(
self,
content):
"""*parse the contents for related files URLs and comments*
**Key Arguments:**
- ``content`` -- the content to parse.
**Return:**
- ``relatedFiles`` -- a list of dictionaries of transient related files
"""
self.log.info('starting the ``_parse_related_files`` method')
relatedFilesList = re.finditer(
r"""<td class="cell-filename">.*?href="(?P<filepath>[^"]*).*?remarks">(?P<fileComment>[^<]*)""",
content,
flags=0 # re.S
)
relatedFiles = []
for f in relatedFilesList:
f = f.groupdict()
relatedFiles.append(f)
self.log.info('completed the ``_parse_related_files`` method')
return relatedFiles
def _parse_spectral_data(
self,
content,
TNSId):
"""*parse spectra data from a row in the tns results content*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page
- ``TNSId`` -- the tns id of the transient
**Return:**
- ``specData`` -- a list of dictionaries of the spectral data
- ``relatedFilesTable`` -- a list of dictionaries of transient spectrum related files
"""
self.log.info('starting the ``_parse_spectral_data`` method')
specData = []
relatedFilesTable = []
# CLASSIFICATION BLOCK
classBlock = re.search(
r"""<tr class=[^\n]*?Classification reportings.*$""",
content,
flags=re.S # re.S
)
if classBlock:
classBlock = classBlock.group()
reports = re.finditer(
r"""<tr class="row-[^"]*"><td class="cell-id">.*?</tbody>\s*</table>\s*</div></td> </tr>\s*</tbody>\s*</table>\s*</div></td> </tr>""",
classBlock,
flags=re.S #
)
relatedFiles = self._parse_related_files(classBlock)
for r in reports:
header = re.search(
r"""<tr class="row.*?time_received">(?P<reportAddedDate>[^<]*).*?user_name">(?P<TNSuser>[^<]*).*?classifier_name">(?P<reporters>[^<]*).*?source_group_name">(?P<survey>[^<]*).*?-type">(?P<specType>[^<]*).*?-redshift">(?P<transRedshift>[^<]*).*?-related_files">(?P<relatedFiles>[^<]*).*?-groups">(?P<surveyGroup>[^<]*).*?-remarks">(?P<sourceComment>[^<]*)</td>""",
r.group(),
flags=re.S # re.S
)
if not header:
continue
header = header.groupdict()
header["TNSId"] = TNSId
del header["reporters"]
del header["surveyGroup"]
del header["survey"]
if not self.comments:
del header['sourceComment']
else:
theseComments = header[
"sourceComment"].split("\n")
header["sourceComment"] = ""
for c in theseComments:
header["sourceComment"] += " " + c.strip()
header["sourceComment"] = header[
"sourceComment"].strip().replace('"', "'")[0:750]
spec = re.finditer(
r"""<tr class="class-results-.*?-obsdate">(?P<obsdate>[^<]*).*?-tel_inst">(?P<telescope>[^<]*).*?-exptime">(?P<exptime>[^<]*).*?-observer">(?P<sender>[^<]*).*?-reducer">(?P<reducer>[^<]*).*?-source_group_name">(?P<survey>[^<]*).*?-asciifile">(.*?<a href="(?P<filepath>[^"]*)".*?</a>)?.*?-fitsfile">(.*?<a href="(?P<fitsFilepath>[^"]*)".*?</a>)?.*?-groups">(?P<surveyGroup>[^<]*).*?-remarks">(?P<remarks>[^<]*)""",
r.group(),
flags=0 # re.S
)
filesAppended = False
for s in spec:
s = s.groupdict()
del s["sender"]
del s["surveyGroup"]
del s["reducer"]
if not self.comments:
del s["remarks"]
else:
s["remarks"] = s["remarks"].replace('"', "'")[0:750]
s.update(header)
if s["relatedFiles"] and filesAppended == False:
filesAppended = True
for f in relatedFiles:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = f[
"filepath"].split("/")[-1]
thisFile["url"] = f["filepath"]
if self.comments:
thisFile["comment"] = f[
"fileComment"].replace("\n", " ").strip()
thisFile["dateObs"] = s["obsdate"]
thisFile["spec1phot2"] = 1
relatedFilesTable.append(thisFile)
for ffile in [s["filepath"], s["fitsFilepath"]]:
if ffile:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = ffile.split(
"/")[-1]
thisFile["url"] = ffile
if self.comments:
thisFile["comment"] = ""
thisFile["dateObs"] = s["obsdate"]
thisFile["spec1phot2"] = 1
relatedFilesTable.append(thisFile)
del s["filepath"]
del s["fitsFilepath"]
del s["relatedFiles"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "survey", "obsdate", "specType", "transRedshift",
"telescope", "exptime", "reportAddedDate", "TNSuser"]
for k, v in s.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = s[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
specData.append(orow)
self.log.info('completed the ``_parse_spectral_data`` method')
return specData, relatedFilesTable
|
thespacedoctor/transientNamer
|
transientNamer/search.py
|
search.mysql
|
python
|
def mysql(
self,
tableNamePrefix="TNS",
dirPath=None):
if dirPath:
p = self._file_prefix()
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_sources` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(20) NOT NULL,
`TNSName` varchar(20) DEFAULT NULL,
`dateCreated` datetime DEFAULT NULL,
`decDeg` double DEFAULT NULL,
`decSex` varchar(45) DEFAULT NULL,
`discDate` datetime DEFAULT NULL,
`discMag` double DEFAULT NULL,
`discMagFilter` varchar(45) DEFAULT NULL,
`discSurvey` varchar(100) DEFAULT NULL,
`discoveryName` varchar(100) DEFAULT NULL,
`objectUrl` varchar(200) DEFAULT NULL,
`raDeg` double DEFAULT NULL,
`raSex` varchar(45) DEFAULT NULL,
`specType` varchar(100) DEFAULT NULL,
`transRedshift` double DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`hostName` VARCHAR(100) NULL DEFAULT NULL,
`hostRedshift` DOUBLE NULL DEFAULT NULL,
`survey` VARCHAR(100) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid` (`TNSId`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlSources = self.sourceResults.mysql(
tableNamePrefix + "_sources", filepath=dirPath + "/" + p + "sources.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_photometry` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(20) NOT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`exptime` double DEFAULT NULL,
`filter` varchar(100) DEFAULT NULL,
`limitingMag` tinyint(4) DEFAULT NULL,
`mag` double DEFAULT NULL,
`magErr` double DEFAULT NULL,
`magUnit` varchar(100) DEFAULT NULL,
`objectName` varchar(100) DEFAULT NULL,
`obsdate` datetime DEFAULT NULL,
`reportAddedDate` datetime DEFAULT NULL,
`suggestedType` varchar(100) DEFAULT NULL,
`survey` varchar(100) DEFAULT NULL,
`telescope` varchar(100) DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`remarks` VARCHAR(800) NULL DEFAULT NULL,
`sourceComment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid_survey_obsdate` (`TNSId`,`survey`,`obsdate`),
UNIQUE INDEX `u_tnsid_survey_obsdate` (`TNSId` ASC, `survey` ASC, `obsdate` ASC),
UNIQUE INDEX `u_tnsid_obsdate_objname` (`TNSId` ASC, `obsdate` ASC, `objectName` ASC)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlPhot = self.photResults.mysql(
tableNamePrefix + "_photometry", filepath=dirPath + "/" + p + "phot.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_spectra` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(45) NOT NULL,
`TNSuser` varchar(45) DEFAULT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`exptime` double DEFAULT NULL,
`obsdate` datetime DEFAULT NULL,
`reportAddedDate` datetime DEFAULT NULL,
`specType` varchar(100) DEFAULT NULL,
`survey` varchar(100) DEFAULT NULL,
`telescope` varchar(100) DEFAULT NULL,
`transRedshift` double DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`remarks` VARCHAR(800) NULL DEFAULT NULL,
`sourceComment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `u_tnsid_survey_obsdate` (`TNSId`,`survey`,`obsdate`),
UNIQUE KEY `u_id_user_obsdate` (`TNSId`,`TNSuser`,`obsdate`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlSpec = self.specResults.mysql(
tableNamePrefix + "_spectra", filepath=dirPath + "/" + p + "spec.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_files` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(100) NOT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`dateObs` datetime DEFAULT NULL,
`filename` varchar(200) DEFAULT NULL,
`spec1phot2` tinyint(4) DEFAULT NULL,
`url` varchar(800) DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`comment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid_url` (`TNSId`,`url`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlFiles = self.relatedFilesResults.mysql(
tableNamePrefix + "_files", filepath=dirPath + "/" + p + "relatedFiles.sql", createStatement=createStatement)
else:
mysqlSources = self.sourceResults.mysql(
tableNamePrefix + "_sources")
mysqlPhot = self.photResults.mysql(tableNamePrefix + "_photometry")
mysqlSpec = self.specResults.mysql(tableNamePrefix + "_spectra")
mysqlFiles = self.relatedFilesResults.mysql(
tableNamePrefix + "_files")
return mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles
|
*Render the results as MySQL Insert statements*
**Key Arguments:**
- ``tableNamePrefix`` -- the prefix for the database table names to assign the insert statements to. Default *TNS*.
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `mysqlSources` -- the top-level transient data
- `mysqlPhot` -- all photometry associated with the transients
- `mysqlSpec` -- all spectral data associated with the transients
- `mysqlFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in mysql insert format:
.. code-block:: python
mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles = tns.mysql("TNS")
print mysqlSources
.. code-block:: text
INSERT INTO `TNS_sources` (TNSId,TNSName,dateCreated,decDeg,decSex,discDate,discMag,discMagFilter,discSurvey,discoveryName,hostName,hostRedshift,objectUrl,raDeg,raSex,separationArcsec,separationEastArcsec,separationNorthArcsec,specType,transRedshift) VALUES ("2016asf" ,"SN2016asf" ,"2016-09-20T11:22:13" ,"31.1126" ,"+31:06:45.36" ,"2016-03-06 08:09:36" ,"17.1" ,"V-Johnson" ,"ASAS-SN" ,"ASASSN-16cs" ,"KUG 0647+311" ,null ,"http://wis-tns.weizmann.ac.il/object/2016asf" ,"102.653041667" ,"06:50:36.73" ,"0.66" ,"-0.13" ,"0.65" ,"SN Ia" ,"0.021") ON DUPLICATE KEY UPDATE TNSId="2016asf", TNSName="SN2016asf", dateCreated="2016-09-20T11:22:13", decDeg="31.1126", decSex="+31:06:45.36", discDate="2016-03-06 08:09:36", discMag="17.1", discMagFilter="V-Johnson", discSurvey="ASAS-SN", discoveryName="ASASSN-16cs", hostName="KUG 0647+311", hostRedshift=null, objectUrl="http://wis-tns.weizmann.ac.il/object/2016asf", raDeg="102.653041667", raSex="06:50:36.73", separationArcsec="0.66", separationEastArcsec="-0.13", separationNorthArcsec="0.65", specType="SN Ia", transRedshift="0.021", updated=1, dateLastModified=NOW() ;
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.mysql("TNS", "~/tns")
.. image:: https://i.imgur.com/CozySPW.png
:width: 800px
:alt: mysql output
|
train
|
https://github.com/thespacedoctor/transientNamer/blob/39be410c84275ed4669632f5df67e728d66a318f/transientNamer/search.py#L548-L704
|
[
"def _file_prefix(\n self):\n \"\"\"*Generate a file prefix based on the type of search for saving files to disk*\n\n **Return:**\n - ``prefix`` -- the file prefix\n \"\"\"\n self.log.info('starting the ``_file_prefix`` method')\n\n if self.ra:\n now = datetime.now()\n prefix = now.strftime(\"%Y%m%dt%H%M%S%f_tns_conesearch_\")\n elif self.name:\n prefix = self.name + \"_tns_conesearch_\"\n elif self.internal_name:\n prefix = self.internal_name + \"_tns_conesearch_\"\n elif self.discInLastDays:\n discInLastDays = str(self.discInLastDays)\n now = datetime.now()\n prefix = now.strftime(\n discInLastDays + \"d_since_%Y%m%d_tns_conesearch_\")\n\n self.log.info('completed the ``_file_prefix`` method')\n return prefix\n"
] |
class search():
"""
*The worker class for the transient namer search module*
**Key Arguments:**
- ``log`` -- logger
- ``settings`` -- the settings dictionary
- ``ra`` -- RA of the location being checked
- ``dec`` -- DEC of the location being searched
- ``radiusArcsec`` - the radius of the conesearch to perform against the TNS
- ``name`` -- name of the object to search the TNS for
- ``discInLastDays`` -- search the TNS for transient discovered in the last X days
- ``comments`` -- print the comments from the TNS, note these can be long making table outputs somewhat unreadable. Default *False*
**Usage:**
To initiate a search object to search the TNS via an object name (either TNS or survey names accepted):
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
name="Gaia16bbi"
)
or for a conesearch use something similar to:
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
ra="06:50:36.74",
dec="+31:06:44.7",
radiusArcsec=5
)
Note the search method can accept coordinates in sexagesimal or decimal defree formats.
To list all new objects discovered in the last three weeks, then use:
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
discInLastDays=21
)
"""
# Initialisation
def __init__(
self,
log,
ra="",
dec="",
radiusArcsec="",
name="",
discInLastDays="",
settings=False,
comments=False
):
self.log = log
log.debug("instansiating a new 'search' object")
self.settings = settings
self.ra = ra
self.dec = dec
self.radiusArcsec = radiusArcsec
self.comments = comments
self.name = name
self.internal_name = ""
self.discInLastDays = discInLastDays
self.page = 0
self.batchSize = 1000
# CREATE THE TIME-RANGE WINDOW TO SEARCH TNS
if not discInLastDays:
self.start = ""
self.end = ""
else:
discInLastDays = int(discInLastDays)
td = timedelta(days=1)
end = datetime.now() + td
self.end = end.strftime("%Y-%m-%d")
td = timedelta(days=discInLastDays)
start = datetime.now() - td
self.start = start.strftime("%Y-%m-%d")
# DETERMINE IF WE HAVE A TNS OR INTERAL SURVEY NAME
if self.name:
matchObject = re.match(r'^((SN|AT) ?)?(\d{4}\w{1,6})', self.name)
if matchObject:
self.name = matchObject.group(3)
else:
self.internal_name = self.name
self.name = ""
# DO THE SEARCH OF THE TNS AND COMPILE THE RESULTS INTO SEPARATE RESULT
# SETS
self.sourceResultsList, self.photResultsList, self.specResultsList, self.relatedFilesResultsList = self._query_tns()
self.sourceResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.sourceResultsList
)
self.photResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.photResultsList
)
self.specResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.specResultsList
)
self.relatedFilesResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.relatedFilesResultsList
)
return None
@property
def sources(
self):
"""*The results of the search returned as a python list of dictionaries*
**Usage:**
.. code-block:: python
sources = tns.sources
"""
sourceResultsList = []
sourceResultsList[:] = [dict(l) for l in self.sourceResultsList]
return sourceResultsList
@property
def spectra(
self):
"""*The associated source spectral data*
**Usage:**
.. code-block:: python
sourceSpectra = tns.spectra
"""
specResultsList = []
specResultsList[:] = [dict(l) for l in self.specResultsList]
return specResultsList
@property
def files(
self):
"""*The associated source files*
**Usage:**
.. code-block:: python
sourceFiles = tns.files
"""
relatedFilesResultsList = []
relatedFilesResultsList[:] = [dict(l)
for l in self.relatedFilesResultsList]
return relatedFilesResultsList
@property
def photometry(
self):
"""*The associated source photometry*
**Usage:**
.. code-block:: python
sourcePhotometry = tns.photometry
"""
photResultsList = []
photResultsList[:] = [dict(l) for l in self.photResultsList]
return photResultsList
@property
def url(
self):
"""*The generated URL used for searching of the TNS*
**Usage:**
.. code-block:: python
searchURL = tns.url
"""
return self._searchURL
def csv(
self,
dirPath=None):
"""*Render the results in csv format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `csvSources` -- the top-level transient data
- `csvPhot` -- all photometry associated with the transients
- `csvSpec` -- all spectral data associated with the transients
- `csvFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in csv format:
.. code-block:: python
csvSources, csvPhot, csvSpec, csvFiles = tns.csv()
print csvSources
.. code-block:: text
TNSId,TNSName,discoveryName,discSurvey,raSex,decSex,raDeg,decDeg,transRedshift,specType,discMag,discMagFilter,discDate,objectUrl,hostName,hostRedshift,separationArcsec,separationNorthArcsec,separationEastArcsec
2016asf,SN2016asf,ASASSN-16cs,ASAS-SN,06:50:36.73,+31:06:45.36,102.6530,31.1126,0.021,SN Ia,17.1,V-Johnson,2016-03-06 08:09:36,http://wis-tns.weizmann.ac.il/object/2016asf,KUG 0647+311,,0.66,0.65,-0.13
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.csv("~/tns")
.. image:: https://i.imgur.com/BwwqMBg.png
:width: 800px
:alt: csv output
"""
if dirPath:
p = self._file_prefix()
csvSources = self.sourceResults.csv(
filepath=dirPath + "/" + p + "sources.csv")
csvPhot = self.photResults.csv(
filepath=dirPath + "/" + p + "phot.csv")
csvSpec = self.specResults.csv(
filepath=dirPath + "/" + p + "spec.csv")
csvFiles = self.relatedFilesResults.csv(
filepath=dirPath + "/" + p + "relatedFiles.csv")
else:
csvSources = self.sourceResults.csv()
csvPhot = self.photResults.csv()
csvSpec = self.specResults.csv()
csvFiles = self.relatedFilesResults.csv()
return csvSources, csvPhot, csvSpec, csvFiles
def json(
self,
dirPath=None):
"""*Render the results in json format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `jsonSources` -- the top-level transient data
- `jsonPhot` -- all photometry associated with the transients
- `jsonSpec` -- all spectral data associated with the transients
- `jsonFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in json format:
.. code-block:: python
jsonSources, jsonPhot, jsonSpec, jsonFiles = tns.json()
print jsonSources
.. code-block:: text
[
{
"TNSId": "2016asf",
"TNSName": "SN2016asf",
"decDeg": 31.1126,
"decSex": "+31:06:45.36",
"discDate": "2016-03-06 08:09:36",
"discMag": "17.1",
"discMagFilter": "V-Johnson",
"discSurvey": "ASAS-SN",
"discoveryName": "ASASSN-16cs",
"hostName": "KUG 0647+311",
"hostRedshift": null,
"objectUrl": "http://wis-tns.weizmann.ac.il/object/2016asf",
"raDeg": 102.65304166666667,
"raSex": "06:50:36.73",
"separationArcsec": "0.66",
"separationEastArcsec": "-0.13",
"separationNorthArcsec": "0.65",
"specType": "SN Ia",
"transRedshift": "0.021"
}
]
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.json("~/tns")
.. image:: https://i.imgur.com/wAHqARI.png
:width: 800px
:alt: json output
"""
if dirPath:
p = self._file_prefix()
jsonSources = self.sourceResults.json(
filepath=dirPath + "/" + p + "sources.json")
jsonPhot = self.photResults.json(
filepath=dirPath + "/" + p + "phot.json")
jsonSpec = self.specResults.json(
filepath=dirPath + "/" + p + "spec.json")
jsonFiles = self.relatedFilesResults.json(
filepath=dirPath + "/" + p + "relatedFiles.json")
else:
jsonSources = self.sourceResults.json()
jsonPhot = self.photResults.json()
jsonSpec = self.specResults.json()
jsonFiles = self.relatedFilesResults.json()
return jsonSources, jsonPhot, jsonSpec, jsonFiles
def yaml(
self,
dirPath=None):
"""*Render the results in yaml format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `yamlSources` -- the top-level transient data
- `yamlPhot` -- all photometry associated with the transients
- `yamlSpec` -- all spectral data associated with the transients
- `yamlFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in yaml format:
.. code-block:: python
yamlSources, yamlPhot, yamlSpec, yamlFiles = tns.yaml()
print yamlSources
.. code-block:: text
- TNSId: 2016asf
TNSName: SN2016asf
decDeg: 31.1126
decSex: '+31:06:45.36'
discDate: '2016-03-06 08:09:36'
discMag: '17.1'
discMagFilter: V-Johnson
discSurvey: ASAS-SN
discoveryName: ASASSN-16cs
hostName: KUG 0647+311
hostRedshift: null
objectUrl: http://wis-tns.weizmann.ac.il/object/2016asf
raDeg: 102.65304166666667
raSex: '06:50:36.73'
separationArcsec: '0.66'
separationEastArcsec: '-0.13'
separationNorthArcsec: '0.65'
specType: SN Ia
transRedshift: '0.021'
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.yaml("~/tns")
.. image:: https://i.imgur.com/ZpJIC6p.png
:width: 800px
:alt: yaml output
"""
if dirPath:
p = self._file_prefix()
yamlSources = self.sourceResults.yaml(
filepath=dirPath + "/" + p + "sources.yaml")
yamlPhot = self.photResults.yaml(
filepath=dirPath + "/" + p + "phot.yaml")
yamlSpec = self.specResults.yaml(
filepath=dirPath + "/" + p + "spec.yaml")
yamlFiles = self.relatedFilesResults.yaml(
filepath=dirPath + "/" + p + "relatedFiles.yaml")
else:
yamlSources = self.sourceResults.yaml()
yamlPhot = self.photResults.yaml()
yamlSpec = self.specResults.yaml()
yamlFiles = self.relatedFilesResults.yaml()
return yamlSources, yamlPhot, yamlSpec, yamlFiles
def markdown(
self,
dirPath=None):
"""*Render the results in markdown format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `markdownSources` -- the top-level transient data
- `markdownPhot` -- all photometry associated with the transients
- `markdownSpec` -- all spectral data associated with the transients
- `markdownFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in markdown table format:
.. code-block:: python
markdownSources, markdownPhot, markdownSpec, markdownFiles = tns.markdown()
print markdownSources
.. code-block:: text
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
|:---------|:-----------|:---------------|:------------|:-------------|:--------------|:----------|:---------|:---------------|:----------|:---------|:---------------|:---------------------|:----------------------------------------------|:--------------|:--------------|:------------------|:-----------------------|:----------------------|
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.markdown("~/tns")
.. image:: https://i.imgur.com/AYLBQoJ.png
:width: 800px
:alt: markdown output
"""
if dirPath:
p = self._file_prefix()
markdownSources = self.sourceResults.markdown(
filepath=dirPath + "/" + p + "sources.md")
markdownPhot = self.photResults.markdown(
filepath=dirPath + "/" + p + "phot.md")
markdownSpec = self.specResults.markdown(
filepath=dirPath + "/" + p + "spec.md")
markdownFiles = self.relatedFilesResults.markdown(
filepath=dirPath + "/" + p + "relatedFiles.md")
else:
markdownSources = self.sourceResults.markdown()
markdownPhot = self.photResults.markdown()
markdownSpec = self.specResults.markdown()
markdownFiles = self.relatedFilesResults.markdown()
return markdownSources, markdownPhot, markdownSpec, markdownFiles
def table(
self,
dirPath=None):
"""*Render the results as an ascii table*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `tableSources` -- the top-level transient data
- `tablePhot` -- all photometry associated with the transients
- `tableSpec` -- all spectral data associated with the transients
- `tableFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in ascii table format:
.. code-block:: python
tableSources, tablePhot, tableSpec, tableFiles = tns.table()
print tableSources
.. code-block:: text
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.table("~/tns")
.. image:: https://i.imgur.com/m09M0ho.png
:width: 800px
:alt: ascii files
"""
if dirPath:
p = self._file_prefix()
tableSources = self.sourceResults.table(
filepath=dirPath + "/" + p + "sources.ascii")
tablePhot = self.photResults.table(
filepath=dirPath + "/" + p + "phot.ascii")
tableSpec = self.specResults.table(
filepath=dirPath + "/" + p + "spec.ascii")
tableFiles = self.relatedFilesResults.table(
filepath=dirPath + "/" + p + "relatedFiles.ascii")
else:
tableSources = self.sourceResults.table()
tablePhot = self.photResults.table()
tableSpec = self.specResults.table()
tableFiles = self.relatedFilesResults.table()
return tableSources, tablePhot, tableSpec, tableFiles
def _query_tns(self):
"""
*determine how to query the TNS, send query and parse the results*
**Return:**
- ``results`` -- a list of dictionaries (one dictionary for each result set returned from the TNS)
"""
self.log.info('starting the ``get`` method')
sourceTable = []
photoTable = []
specTable = []
relatedFilesTable = []
# THIS stop IS TO KEEP TRACK OF THE TNS PAGINATION IF MANY RESULT PAGES
# ARE RETURNED
stop = False
sourceCount = 0
while not stop:
status_code, content, self._searchURL = self._get_tns_search_results()
if status_code != 200:
self.log.error(
'cound not get the search reuslts from the TNS, HTML error code %(status_code)s ' % locals())
return None
if "No results found" in content:
print "No results found"
return sourceTable, photoTable, specTable, relatedFilesTable
if self._parse_transient_rows(content, True) < self.batchSize:
stop = True
else:
self.page += 1
thisPage = self.page
print "Downloaded %(thisPage)s page(s) from the TNS. %(sourceCount)s transients parsed so far." % locals()
sourceCount += self.batchSize
print "\t" + self._searchURL
timesleep.sleep(1)
# PARSE ALL ROWS RETURNED
for transientRow in self._parse_transient_rows(content):
# TOP LEVEL DISCOVERY CONTENT
sourceContent = transientRow.group()
discInfo, TNSId = self._parse_discovery_information(
sourceContent)
sourceTable.append(discInfo)
# PHOTOMETERY
phot, relatedFiles = self._parse_photometry_data(
sourceContent, TNSId)
photoTable += phot
relatedFilesTable += relatedFiles
# SPECTRA
spec, relatedFiles = self._parse_spectral_data(
sourceContent, TNSId)
specTable += spec
relatedFilesTable += relatedFiles
# SORT BY SEPARATION FROM THE SEARCH COORDINATES
try:
sourceTable = sorted(sourceTable, key=itemgetter(
'separationArcsec'), reverse=False)
except:
pass
self.log.info('completed the ``get`` method')
return sourceTable, photoTable, specTable, relatedFilesTable
def _get_tns_search_results(
self):
"""
*query the tns and result the response*
"""
self.log.info('starting the ``_get_tns_search_results`` method')
try:
response = requests.get(
url="http://wis-tns.weizmann.ac.il/search",
params={
"page": self.page,
"ra": self.ra,
"decl": self.dec,
"radius": self.radiusArcsec,
"name": self.name,
"internal_name": self.internal_name,
"date_start[date]": self.start,
"date_end[date]": self.end,
"num_page": self.batchSize,
"display[redshift]": "1",
"display[hostname]": "1",
"display[host_redshift]": "1",
"display[source_group_name]": "1",
"display[internal_name]": "1",
"display[spectra_count]": "1",
"display[discoverymag]": "1",
"display[discmagfilter]": "1",
"display[discoverydate]": "1",
"display[discoverer]": "1",
"display[sources]": "1",
"display[bibcode]": "1",
},
)
except requests.exceptions.RequestException:
print('HTTP Request failed')
self.log.info('completed the ``_get_tns_search_results`` method')
return response.status_code, response.content, response.url
def _file_prefix(
self):
"""*Generate a file prefix based on the type of search for saving files to disk*
**Return:**
- ``prefix`` -- the file prefix
"""
self.log.info('starting the ``_file_prefix`` method')
if self.ra:
now = datetime.now()
prefix = now.strftime("%Y%m%dt%H%M%S%f_tns_conesearch_")
elif self.name:
prefix = self.name + "_tns_conesearch_"
elif self.internal_name:
prefix = self.internal_name + "_tns_conesearch_"
elif self.discInLastDays:
discInLastDays = str(self.discInLastDays)
now = datetime.now()
prefix = now.strftime(
discInLastDays + "d_since_%Y%m%d_tns_conesearch_")
self.log.info('completed the ``_file_prefix`` method')
return prefix
def _parse_transient_rows(
self,
content,
count=False):
"""* parse transient rows from the TNS result page content*
**Key Arguments:**
- ``content`` -- the content from the TNS results page.
- ``count`` -- return only the number of rows
**Return:**
- ``transientRows``
"""
self.log.info('starting the ``_parse_transient_rows`` method')
regexForRow = r"""\n([^\n]*?<a href="/object/.*?)(?=\n[^\n]*?<a href="/object/|<\!\-\- /\.section, /#content \-\->)"""
if count:
# A SINGLE SOURCE BLOCK
matchedSources = re.findall(
regexForRow,
content,
flags=re.S # re.S
)
return len(matchedSources)
# A SINGLE SOURCE BLOCK
matchedSources = re.finditer(
regexForRow,
content,
flags=re.S # re.S
)
self.log.info('completed the ``_parse_transient_rows`` method')
return matchedSources
def _parse_discovery_information(
self,
content):
"""* parse discovery information from one row on the TNS results page*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page.
**Return:**
- ``discoveryData`` -- dictionary of results
- ``TNSId`` -- the unique TNS id for the transient
"""
self.log.info('starting the ``_parse_discovery_information`` method')
# ASTROCALC UNIT CONVERTER OBJECT
converter = unit_conversion(
log=self.log
)
matches = re.finditer(
r"""<tr class="row-.*?"><td class="cell-id">(?P<tnsId>\d*?)</td><td class="cell-name"><a href="(?P<objectUrl>.*?)">(?P<TNSName>.*?)</a></td><td class="cell-.*?<td class="cell-ra">(?P<raSex>.*?)</td><td class="cell-decl">(?P<decSex>.*?)</td><td class="cell-ot_name">(?P<specType>.*?)</td><td class="cell-redshift">(?P<transRedshift>.*?)</td><td class="cell-hostname">(?P<hostName>.*?)</td><td class="cell-host_redshift">(?P<hostRedshift>.*?)</td><td class="cell-source_group_name">(?P<discSurvey>.*?)</td>.*?<td class="cell-internal_name">(<a.*?>)?(?P<discoveryName>.*?)(</a>)?</td>.*?<td class="cell-discoverymag">(?P<discMag>.*?)</td><td class="cell-disc_filter_name">(?P<discMagFilter>.*?)</td><td class="cell-discoverydate">(?P<discDate>.*?)</td><td class="cell-discoverer">(?P<sender>.*?)</td>.*?</tr>""",
content,
flags=0 # re.S
)
discoveryData = []
for match in matches:
row = match.groupdict()
for k, v in row.iteritems():
row[k] = v.strip()
if len(v) == 0:
row[k] = None
if row["transRedshift"] == 0:
row["transRedshift"] = None
if row["TNSName"][0] in ["1", "2"]:
row["TNSName"] = "SN" + row["TNSName"]
row["objectUrl"] = "http://wis-tns.weizmann.ac.il" + \
row["objectUrl"]
# CONVERT COORDINATES TO DECIMAL DEGREES
row["raDeg"] = converter.ra_sexegesimal_to_decimal(
ra=row["raSex"]
)
row["decDeg"] = converter.dec_sexegesimal_to_decimal(
dec=row["decSex"]
)
# IF THIS IS A COORDINATE SEARCH, ADD SEPARATION FROM
# ORIGINAL QUERY COORDINATES
if self.ra:
# CALCULATE SEPARATION IN ARCSEC
from astrocalc.coords import separations
calculator = separations(
log=self.log,
ra1=self.ra,
dec1=self.dec,
ra2=row["raDeg"],
dec2=row["decDeg"],
)
angularSeparation, north, east = calculator.get()
row["separationArcsec"] = angularSeparation
row["separationNorthArcsec"] = north
row["separationEastArcsec"] = east
if not row["discSurvey"]:
row["survey"] = row["sender"]
del row["sender"]
del row["tnsId"]
row["TNSName"] = row["TNSName"].replace(" ", "")
row["TNSId"] = row["TNSName"].replace(
"SN", "").replace("AT", "")
TNSId = row["TNSId"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "TNSName", "discoveryName", "discSurvey", "raSex", "decSex", "raDeg", "decDeg",
"transRedshift", "specType", "discMag", "discMagFilter", "discDate", "objectUrl", "hostName", "hostRedshift", "separationArcsec", "separationNorthArcsec", "separationEastArcsec"]
for k, v in row.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = row[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
discoveryData.append(row)
self.log.info('completed the ``_parse_discovery_information`` method')
return discoveryData[0], TNSId
def _parse_photometry_data(
self,
content,
TNSId):
"""*parse photometry data from a row in the tns results content*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page
- ``TNSId`` -- the tns id of the transient
**Return:**
- ``photData`` -- a list of dictionaries of the photometry data
- ``relatedFilesTable`` -- a list of dictionaries of transient photometry related files
"""
self.log.info('starting the ``_parse_photometry_data`` method')
photData = []
relatedFilesTable = []
# AT REPORT BLOCK
ATBlock = re.search(
r"""<tr class=[^\n]*?AT reportings.*?(?=<tr class=[^\n]*?Classification reportings|$)""",
content,
flags=re.S # re.S
)
if ATBlock:
ATBlock = ATBlock.group()
reports = re.finditer(
r"""<tr class="row-[^"]*"><td class="cell-id">.*?</table>""",
ATBlock,
flags=re.S # re.S
)
relatedFiles = self._parse_related_files(ATBlock)
for r in reports:
header = re.search(
r"""<tr class="row[^"]*".*?time_received">(?P<reportAddedDate>[^<]*).*?user_name">(?P<sender>[^<]*).*?reporter_name">(?P<reporters>[^<]*).*?source_group_name">(?P<surveyGroup>[^<]*).*?ra">(?P<ra>[^<]*).*?decl">(?P<dec>[^<]*).*?discovery_date">(?P<obsDate>[^<]*).*?flux">(?P<mag>[^<]*).*?filter_name">(?P<magFilter>[^<]*).*?related_files">(?P<relatedFiles>[^<]*).*?type_name">(?P<suggestedType>[^<]*).*?hostname">(?P<hostName>[^<]*).*?host_redshift">(?P<hostRedshift>[^<]*).*?internal_name">(?P<objectName>[^<]*).*?groups">(?P<survey>[^<]*).*?remarks">(?P<sourceComment>[^<]*)""",
r.group(),
flags=0 # re.S
)
try:
header = header.groupdict()
except:
print r.group()
header["TNSId"] = TNSId
del header["reporters"]
del header["surveyGroup"]
del header["hostName"]
del header["hostRedshift"]
del header["mag"]
del header["magFilter"]
del header["obsDate"]
del header["ra"]
del header["dec"]
if not self.comments:
del header['sourceComment']
else:
theseComments = header[
"sourceComment"].split("\n")
header["sourceComment"] = ""
for c in theseComments:
header["sourceComment"] += " " + c.strip()
header["sourceComment"] = header[
"sourceComment"].strip().replace('"', "'")[0:750]
phot = re.finditer(
r"""<tr class="row\-[^"]*".*?obsdate">(?P<obsdate>[^<]*).*?flux">(?P<mag>[^<]*).*?fluxerr">(?P<magErr>[^<]*).*?limflux">(?P<limitingMag>[^<]*).*?unit_name">(?P<magUnit>[^<]*).*?filter_name">(?P<filter>[^<]*).*?tel_inst">(?P<telescope>[^<]*).*?exptime">(?P<exptime>[^<]*).*?observer">(?P<observer>[^<]*).*?-remarks">(?P<remarks>[^<]*)""",
r.group(),
flags=0 # re.S
)
filesAppended = False
for p in phot:
p = p.groupdict()
del p["observer"]
if p["limitingMag"] and not p["mag"]:
p["mag"] = p["limitingMag"]
p["limitingMag"] = 1
p["remarks"] = p["remarks"].replace(
"[Last non detection]", "")
else:
p["limitingMag"] = 0
if not self.comments:
del p["remarks"]
p.update(header)
if p["relatedFiles"] and filesAppended == False:
filesAppended = True
for f in relatedFiles:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = f[
"filepath"].split("/")[-1]
thisFile["url"] = f["filepath"]
if self.comments:
thisFile["comment"] = f[
"fileComment"].replace("\n", " ").strip().replace('"', "'")[0:750]
thisFile["dateObs"] = p["obsdate"]
thisFile["spec1phot2"] = 2
relatedFilesTable.append(thisFile)
if not p["survey"] and not p["objectName"]:
p["survey"] = p["sender"]
del p["relatedFiles"]
del p["sender"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "survey", "obsdate", "filter", "limitingMag", "mag", "magErr",
"magUnit", "suggestedType", "telescope", "exptime", "reportAddedDate"]
for k, v in p.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = p[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
photData.append(orow)
self.log.info('completed the ``_parse_photometry_data`` method')
return photData, relatedFilesTable
def _parse_related_files(
self,
content):
"""*parse the contents for related files URLs and comments*
**Key Arguments:**
- ``content`` -- the content to parse.
**Return:**
- ``relatedFiles`` -- a list of dictionaries of transient related files
"""
self.log.info('starting the ``_parse_related_files`` method')
relatedFilesList = re.finditer(
r"""<td class="cell-filename">.*?href="(?P<filepath>[^"]*).*?remarks">(?P<fileComment>[^<]*)""",
content,
flags=0 # re.S
)
relatedFiles = []
for f in relatedFilesList:
f = f.groupdict()
relatedFiles.append(f)
self.log.info('completed the ``_parse_related_files`` method')
return relatedFiles
def _parse_spectral_data(
self,
content,
TNSId):
"""*parse spectra data from a row in the tns results content*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page
- ``TNSId`` -- the tns id of the transient
**Return:**
- ``specData`` -- a list of dictionaries of the spectral data
- ``relatedFilesTable`` -- a list of dictionaries of transient spectrum related files
"""
self.log.info('starting the ``_parse_spectral_data`` method')
specData = []
relatedFilesTable = []
# CLASSIFICATION BLOCK
classBlock = re.search(
r"""<tr class=[^\n]*?Classification reportings.*$""",
content,
flags=re.S # re.S
)
if classBlock:
classBlock = classBlock.group()
reports = re.finditer(
r"""<tr class="row-[^"]*"><td class="cell-id">.*?</tbody>\s*</table>\s*</div></td> </tr>\s*</tbody>\s*</table>\s*</div></td> </tr>""",
classBlock,
flags=re.S #
)
relatedFiles = self._parse_related_files(classBlock)
for r in reports:
header = re.search(
r"""<tr class="row.*?time_received">(?P<reportAddedDate>[^<]*).*?user_name">(?P<TNSuser>[^<]*).*?classifier_name">(?P<reporters>[^<]*).*?source_group_name">(?P<survey>[^<]*).*?-type">(?P<specType>[^<]*).*?-redshift">(?P<transRedshift>[^<]*).*?-related_files">(?P<relatedFiles>[^<]*).*?-groups">(?P<surveyGroup>[^<]*).*?-remarks">(?P<sourceComment>[^<]*)</td>""",
r.group(),
flags=re.S # re.S
)
if not header:
continue
header = header.groupdict()
header["TNSId"] = TNSId
del header["reporters"]
del header["surveyGroup"]
del header["survey"]
if not self.comments:
del header['sourceComment']
else:
theseComments = header[
"sourceComment"].split("\n")
header["sourceComment"] = ""
for c in theseComments:
header["sourceComment"] += " " + c.strip()
header["sourceComment"] = header[
"sourceComment"].strip().replace('"', "'")[0:750]
spec = re.finditer(
r"""<tr class="class-results-.*?-obsdate">(?P<obsdate>[^<]*).*?-tel_inst">(?P<telescope>[^<]*).*?-exptime">(?P<exptime>[^<]*).*?-observer">(?P<sender>[^<]*).*?-reducer">(?P<reducer>[^<]*).*?-source_group_name">(?P<survey>[^<]*).*?-asciifile">(.*?<a href="(?P<filepath>[^"]*)".*?</a>)?.*?-fitsfile">(.*?<a href="(?P<fitsFilepath>[^"]*)".*?</a>)?.*?-groups">(?P<surveyGroup>[^<]*).*?-remarks">(?P<remarks>[^<]*)""",
r.group(),
flags=0 # re.S
)
filesAppended = False
for s in spec:
s = s.groupdict()
del s["sender"]
del s["surveyGroup"]
del s["reducer"]
if not self.comments:
del s["remarks"]
else:
s["remarks"] = s["remarks"].replace('"', "'")[0:750]
s.update(header)
if s["relatedFiles"] and filesAppended == False:
filesAppended = True
for f in relatedFiles:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = f[
"filepath"].split("/")[-1]
thisFile["url"] = f["filepath"]
if self.comments:
thisFile["comment"] = f[
"fileComment"].replace("\n", " ").strip()
thisFile["dateObs"] = s["obsdate"]
thisFile["spec1phot2"] = 1
relatedFilesTable.append(thisFile)
for ffile in [s["filepath"], s["fitsFilepath"]]:
if ffile:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = ffile.split(
"/")[-1]
thisFile["url"] = ffile
if self.comments:
thisFile["comment"] = ""
thisFile["dateObs"] = s["obsdate"]
thisFile["spec1phot2"] = 1
relatedFilesTable.append(thisFile)
del s["filepath"]
del s["fitsFilepath"]
del s["relatedFiles"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "survey", "obsdate", "specType", "transRedshift",
"telescope", "exptime", "reportAddedDate", "TNSuser"]
for k, v in s.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = s[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
specData.append(orow)
self.log.info('completed the ``_parse_spectral_data`` method')
return specData, relatedFilesTable
|
thespacedoctor/transientNamer
|
transientNamer/search.py
|
search._query_tns
|
python
|
def _query_tns(self):
self.log.info('starting the ``get`` method')
sourceTable = []
photoTable = []
specTable = []
relatedFilesTable = []
# THIS stop IS TO KEEP TRACK OF THE TNS PAGINATION IF MANY RESULT PAGES
# ARE RETURNED
stop = False
sourceCount = 0
while not stop:
status_code, content, self._searchURL = self._get_tns_search_results()
if status_code != 200:
self.log.error(
'cound not get the search reuslts from the TNS, HTML error code %(status_code)s ' % locals())
return None
if "No results found" in content:
print "No results found"
return sourceTable, photoTable, specTable, relatedFilesTable
if self._parse_transient_rows(content, True) < self.batchSize:
stop = True
else:
self.page += 1
thisPage = self.page
print "Downloaded %(thisPage)s page(s) from the TNS. %(sourceCount)s transients parsed so far." % locals()
sourceCount += self.batchSize
print "\t" + self._searchURL
timesleep.sleep(1)
# PARSE ALL ROWS RETURNED
for transientRow in self._parse_transient_rows(content):
# TOP LEVEL DISCOVERY CONTENT
sourceContent = transientRow.group()
discInfo, TNSId = self._parse_discovery_information(
sourceContent)
sourceTable.append(discInfo)
# PHOTOMETERY
phot, relatedFiles = self._parse_photometry_data(
sourceContent, TNSId)
photoTable += phot
relatedFilesTable += relatedFiles
# SPECTRA
spec, relatedFiles = self._parse_spectral_data(
sourceContent, TNSId)
specTable += spec
relatedFilesTable += relatedFiles
# SORT BY SEPARATION FROM THE SEARCH COORDINATES
try:
sourceTable = sorted(sourceTable, key=itemgetter(
'separationArcsec'), reverse=False)
except:
pass
self.log.info('completed the ``get`` method')
return sourceTable, photoTable, specTable, relatedFilesTable
|
*determine how to query the TNS, send query and parse the results*
**Return:**
- ``results`` -- a list of dictionaries (one dictionary for each result set returned from the TNS)
|
train
|
https://github.com/thespacedoctor/transientNamer/blob/39be410c84275ed4669632f5df67e728d66a318f/transientNamer/search.py#L706-L777
|
[
"def _get_tns_search_results(\n self):\n \"\"\"\n *query the tns and result the response*\n \"\"\"\n self.log.info('starting the ``_get_tns_search_results`` method')\n\n try:\n response = requests.get(\n url=\"http://wis-tns.weizmann.ac.il/search\",\n params={\n \"page\": self.page,\n \"ra\": self.ra,\n \"decl\": self.dec,\n \"radius\": self.radiusArcsec,\n \"name\": self.name,\n \"internal_name\": self.internal_name,\n \"date_start[date]\": self.start,\n \"date_end[date]\": self.end,\n \"num_page\": self.batchSize,\n \"display[redshift]\": \"1\",\n \"display[hostname]\": \"1\",\n \"display[host_redshift]\": \"1\",\n \"display[source_group_name]\": \"1\",\n \"display[internal_name]\": \"1\",\n \"display[spectra_count]\": \"1\",\n \"display[discoverymag]\": \"1\",\n \"display[discmagfilter]\": \"1\",\n \"display[discoverydate]\": \"1\",\n \"display[discoverer]\": \"1\",\n \"display[sources]\": \"1\",\n \"display[bibcode]\": \"1\",\n },\n )\n\n except requests.exceptions.RequestException:\n print('HTTP Request failed')\n\n self.log.info('completed the ``_get_tns_search_results`` method')\n return response.status_code, response.content, response.url\n",
"def _parse_transient_rows(\n self,\n content,\n count=False):\n \"\"\"* parse transient rows from the TNS result page content*\n\n **Key Arguments:**\n - ``content`` -- the content from the TNS results page.\n - ``count`` -- return only the number of rows\n\n **Return:**\n - ``transientRows``\n \"\"\"\n self.log.info('starting the ``_parse_transient_rows`` method')\n\n regexForRow = r\"\"\"\\n([^\\n]*?<a href=\"/object/.*?)(?=\\n[^\\n]*?<a href=\"/object/|<\\!\\-\\- /\\.section, /#content \\-\\->)\"\"\"\n\n if count:\n # A SINGLE SOURCE BLOCK\n matchedSources = re.findall(\n regexForRow,\n content,\n flags=re.S # re.S\n )\n return len(matchedSources)\n\n # A SINGLE SOURCE BLOCK\n matchedSources = re.finditer(\n regexForRow,\n content,\n flags=re.S # re.S\n )\n\n self.log.info('completed the ``_parse_transient_rows`` method')\n return matchedSources\n",
"def _parse_discovery_information(\n self,\n content):\n \"\"\"* parse discovery information from one row on the TNS results page*\n\n **Key Arguments:**\n - ``content`` -- a table row from the TNS results page.\n\n **Return:**\n - ``discoveryData`` -- dictionary of results\n - ``TNSId`` -- the unique TNS id for the transient\n \"\"\"\n self.log.info('starting the ``_parse_discovery_information`` method')\n\n # ASTROCALC UNIT CONVERTER OBJECT\n converter = unit_conversion(\n log=self.log\n )\n\n matches = re.finditer(\n r\"\"\"<tr class=\"row-.*?\"><td class=\"cell-id\">(?P<tnsId>\\d*?)</td><td class=\"cell-name\"><a href=\"(?P<objectUrl>.*?)\">(?P<TNSName>.*?)</a></td><td class=\"cell-.*?<td class=\"cell-ra\">(?P<raSex>.*?)</td><td class=\"cell-decl\">(?P<decSex>.*?)</td><td class=\"cell-ot_name\">(?P<specType>.*?)</td><td class=\"cell-redshift\">(?P<transRedshift>.*?)</td><td class=\"cell-hostname\">(?P<hostName>.*?)</td><td class=\"cell-host_redshift\">(?P<hostRedshift>.*?)</td><td class=\"cell-source_group_name\">(?P<discSurvey>.*?)</td>.*?<td class=\"cell-internal_name\">(<a.*?>)?(?P<discoveryName>.*?)(</a>)?</td>.*?<td class=\"cell-discoverymag\">(?P<discMag>.*?)</td><td class=\"cell-disc_filter_name\">(?P<discMagFilter>.*?)</td><td class=\"cell-discoverydate\">(?P<discDate>.*?)</td><td class=\"cell-discoverer\">(?P<sender>.*?)</td>.*?</tr>\"\"\",\n content,\n flags=0 # re.S\n )\n discoveryData = []\n for match in matches:\n row = match.groupdict()\n for k, v in row.iteritems():\n row[k] = v.strip()\n if len(v) == 0:\n row[k] = None\n if row[\"transRedshift\"] == 0:\n row[\"transRedshift\"] = None\n if row[\"TNSName\"][0] in [\"1\", \"2\"]:\n row[\"TNSName\"] = \"SN\" + row[\"TNSName\"]\n row[\"objectUrl\"] = \"http://wis-tns.weizmann.ac.il\" + \\\n row[\"objectUrl\"]\n\n # CONVERT COORDINATES TO DECIMAL DEGREES\n row[\"raDeg\"] = converter.ra_sexegesimal_to_decimal(\n ra=row[\"raSex\"]\n )\n row[\"decDeg\"] = converter.dec_sexegesimal_to_decimal(\n dec=row[\"decSex\"]\n )\n\n # IF THIS IS A COORDINATE SEARCH, ADD SEPARATION FROM\n # ORIGINAL QUERY COORDINATES\n if self.ra:\n # CALCULATE SEPARATION IN ARCSEC\n from astrocalc.coords import separations\n calculator = separations(\n log=self.log,\n ra1=self.ra,\n dec1=self.dec,\n ra2=row[\"raDeg\"],\n dec2=row[\"decDeg\"],\n )\n angularSeparation, north, east = calculator.get()\n row[\"separationArcsec\"] = angularSeparation\n row[\"separationNorthArcsec\"] = north\n row[\"separationEastArcsec\"] = east\n\n if not row[\"discSurvey\"]:\n row[\"survey\"] = row[\"sender\"]\n\n del row[\"sender\"]\n del row[\"tnsId\"]\n row[\"TNSName\"] = row[\"TNSName\"].replace(\" \", \"\")\n row[\"TNSId\"] = row[\"TNSName\"].replace(\n \"SN\", \"\").replace(\"AT\", \"\")\n TNSId = row[\"TNSId\"]\n\n # ORDER THE DICTIONARY FOR THIS ROW OF RESULTS\n orow = collections.OrderedDict()\n keyOrder = [\"TNSId\", \"TNSName\", \"discoveryName\", \"discSurvey\", \"raSex\", \"decSex\", \"raDeg\", \"decDeg\",\n \"transRedshift\", \"specType\", \"discMag\", \"discMagFilter\", \"discDate\", \"objectUrl\", \"hostName\", \"hostRedshift\", \"separationArcsec\", \"separationNorthArcsec\", \"separationEastArcsec\"]\n for k, v in row.iteritems():\n if k not in keyOrder:\n keyOrder.append(k)\n for k in keyOrder:\n try:\n orow[k] = row[k]\n except:\n self.log.info(\n \"`%(k)s` not found in the source data for %(TNSId)s\" % locals())\n pass\n discoveryData.append(row)\n\n self.log.info('completed the ``_parse_discovery_information`` method')\n return discoveryData[0], TNSId\n",
"def _parse_photometry_data(\n self,\n content,\n TNSId):\n \"\"\"*parse photometry data from a row in the tns results content*\n\n **Key Arguments:**\n - ``content`` -- a table row from the TNS results page\n - ``TNSId`` -- the tns id of the transient\n\n **Return:**\n - ``photData`` -- a list of dictionaries of the photometry data\n - ``relatedFilesTable`` -- a list of dictionaries of transient photometry related files \n \"\"\"\n self.log.info('starting the ``_parse_photometry_data`` method')\n\n photData = []\n relatedFilesTable = []\n\n # AT REPORT BLOCK\n ATBlock = re.search(\n r\"\"\"<tr class=[^\\n]*?AT reportings.*?(?=<tr class=[^\\n]*?Classification reportings|$)\"\"\",\n content,\n flags=re.S # re.S\n )\n\n if ATBlock:\n ATBlock = ATBlock.group()\n reports = re.finditer(\n r\"\"\"<tr class=\"row-[^\"]*\"><td class=\"cell-id\">.*?</table>\"\"\",\n ATBlock,\n flags=re.S # re.S\n )\n\n relatedFiles = self._parse_related_files(ATBlock)\n\n for r in reports:\n header = re.search(\n r\"\"\"<tr class=\"row[^\"]*\".*?time_received\">(?P<reportAddedDate>[^<]*).*?user_name\">(?P<sender>[^<]*).*?reporter_name\">(?P<reporters>[^<]*).*?source_group_name\">(?P<surveyGroup>[^<]*).*?ra\">(?P<ra>[^<]*).*?decl\">(?P<dec>[^<]*).*?discovery_date\">(?P<obsDate>[^<]*).*?flux\">(?P<mag>[^<]*).*?filter_name\">(?P<magFilter>[^<]*).*?related_files\">(?P<relatedFiles>[^<]*).*?type_name\">(?P<suggestedType>[^<]*).*?hostname\">(?P<hostName>[^<]*).*?host_redshift\">(?P<hostRedshift>[^<]*).*?internal_name\">(?P<objectName>[^<]*).*?groups\">(?P<survey>[^<]*).*?remarks\">(?P<sourceComment>[^<]*)\"\"\",\n r.group(),\n flags=0 # re.S\n )\n try:\n header = header.groupdict()\n except:\n print r.group()\n header[\"TNSId\"] = TNSId\n\n del header[\"reporters\"]\n del header[\"surveyGroup\"]\n del header[\"hostName\"]\n del header[\"hostRedshift\"]\n del header[\"mag\"]\n del header[\"magFilter\"]\n del header[\"obsDate\"]\n del header[\"ra\"]\n del header[\"dec\"]\n\n if not self.comments:\n del header['sourceComment']\n else:\n theseComments = header[\n \"sourceComment\"].split(\"\\n\")\n header[\"sourceComment\"] = \"\"\n for c in theseComments:\n header[\"sourceComment\"] += \" \" + c.strip()\n header[\"sourceComment\"] = header[\n \"sourceComment\"].strip().replace('\"', \"'\")[0:750]\n\n phot = re.finditer(\n r\"\"\"<tr class=\"row\\-[^\"]*\".*?obsdate\">(?P<obsdate>[^<]*).*?flux\">(?P<mag>[^<]*).*?fluxerr\">(?P<magErr>[^<]*).*?limflux\">(?P<limitingMag>[^<]*).*?unit_name\">(?P<magUnit>[^<]*).*?filter_name\">(?P<filter>[^<]*).*?tel_inst\">(?P<telescope>[^<]*).*?exptime\">(?P<exptime>[^<]*).*?observer\">(?P<observer>[^<]*).*?-remarks\">(?P<remarks>[^<]*)\"\"\",\n r.group(),\n flags=0 # re.S\n )\n filesAppended = False\n for p in phot:\n p = p.groupdict()\n del p[\"observer\"]\n\n if p[\"limitingMag\"] and not p[\"mag\"]:\n p[\"mag\"] = p[\"limitingMag\"]\n p[\"limitingMag\"] = 1\n p[\"remarks\"] = p[\"remarks\"].replace(\n \"[Last non detection]\", \"\")\n else:\n p[\"limitingMag\"] = 0\n\n if not self.comments:\n del p[\"remarks\"]\n\n p.update(header)\n\n if p[\"relatedFiles\"] and filesAppended == False:\n filesAppended = True\n for f in relatedFiles:\n # ORDER THE DICTIONARY FOR THIS ROW OF\n # RESULTS\n thisFile = collections.OrderedDict()\n thisFile[\"TNSId\"] = TNSId\n thisFile[\"filename\"] = f[\n \"filepath\"].split(\"/\")[-1]\n thisFile[\"url\"] = f[\"filepath\"]\n if self.comments:\n thisFile[\"comment\"] = f[\n \"fileComment\"].replace(\"\\n\", \" \").strip().replace('\"', \"'\")[0:750]\n thisFile[\"dateObs\"] = p[\"obsdate\"]\n thisFile[\"spec1phot2\"] = 2\n relatedFilesTable.append(thisFile)\n\n if not p[\"survey\"] and not p[\"objectName\"]:\n p[\"survey\"] = p[\"sender\"]\n\n del p[\"relatedFiles\"]\n del p[\"sender\"]\n\n # ORDER THE DICTIONARY FOR THIS ROW OF RESULTS\n orow = collections.OrderedDict()\n keyOrder = [\"TNSId\", \"survey\", \"obsdate\", \"filter\", \"limitingMag\", \"mag\", \"magErr\",\n \"magUnit\", \"suggestedType\", \"telescope\", \"exptime\", \"reportAddedDate\"]\n for k, v in p.iteritems():\n if k not in keyOrder:\n keyOrder.append(k)\n for k in keyOrder:\n try:\n orow[k] = p[k]\n except:\n self.log.info(\n \"`%(k)s` not found in the source data for %(TNSId)s\" % locals())\n pass\n\n photData.append(orow)\n\n self.log.info('completed the ``_parse_photometry_data`` method')\n return photData, relatedFilesTable\n",
"def _parse_spectral_data(\n self,\n content,\n TNSId):\n \"\"\"*parse spectra data from a row in the tns results content*\n\n **Key Arguments:**\n - ``content`` -- a table row from the TNS results page\n - ``TNSId`` -- the tns id of the transient\n\n **Return:**\n - ``specData`` -- a list of dictionaries of the spectral data\n - ``relatedFilesTable`` -- a list of dictionaries of transient spectrum related files \n \"\"\"\n self.log.info('starting the ``_parse_spectral_data`` method')\n\n specData = []\n relatedFilesTable = []\n\n # CLASSIFICATION BLOCK\n classBlock = re.search(\n r\"\"\"<tr class=[^\\n]*?Classification reportings.*$\"\"\",\n content,\n flags=re.S # re.S\n )\n\n if classBlock:\n classBlock = classBlock.group()\n\n reports = re.finditer(\n r\"\"\"<tr class=\"row-[^\"]*\"><td class=\"cell-id\">.*?</tbody>\\s*</table>\\s*</div></td> </tr>\\s*</tbody>\\s*</table>\\s*</div></td> </tr>\"\"\",\n classBlock,\n flags=re.S #\n )\n\n relatedFiles = self._parse_related_files(classBlock)\n\n for r in reports:\n\n header = re.search(\n r\"\"\"<tr class=\"row.*?time_received\">(?P<reportAddedDate>[^<]*).*?user_name\">(?P<TNSuser>[^<]*).*?classifier_name\">(?P<reporters>[^<]*).*?source_group_name\">(?P<survey>[^<]*).*?-type\">(?P<specType>[^<]*).*?-redshift\">(?P<transRedshift>[^<]*).*?-related_files\">(?P<relatedFiles>[^<]*).*?-groups\">(?P<surveyGroup>[^<]*).*?-remarks\">(?P<sourceComment>[^<]*)</td>\"\"\",\n r.group(),\n flags=re.S # re.S\n )\n if not header:\n continue\n\n header = header.groupdict()\n header[\"TNSId\"] = TNSId\n\n del header[\"reporters\"]\n del header[\"surveyGroup\"]\n del header[\"survey\"]\n\n if not self.comments:\n del header['sourceComment']\n else:\n theseComments = header[\n \"sourceComment\"].split(\"\\n\")\n header[\"sourceComment\"] = \"\"\n for c in theseComments:\n header[\"sourceComment\"] += \" \" + c.strip()\n header[\"sourceComment\"] = header[\n \"sourceComment\"].strip().replace('\"', \"'\")[0:750]\n\n spec = re.finditer(\n r\"\"\"<tr class=\"class-results-.*?-obsdate\">(?P<obsdate>[^<]*).*?-tel_inst\">(?P<telescope>[^<]*).*?-exptime\">(?P<exptime>[^<]*).*?-observer\">(?P<sender>[^<]*).*?-reducer\">(?P<reducer>[^<]*).*?-source_group_name\">(?P<survey>[^<]*).*?-asciifile\">(.*?<a href=\"(?P<filepath>[^\"]*)\".*?</a>)?.*?-fitsfile\">(.*?<a href=\"(?P<fitsFilepath>[^\"]*)\".*?</a>)?.*?-groups\">(?P<surveyGroup>[^<]*).*?-remarks\">(?P<remarks>[^<]*)\"\"\",\n r.group(),\n flags=0 # re.S\n )\n filesAppended = False\n for s in spec:\n s = s.groupdict()\n del s[\"sender\"]\n del s[\"surveyGroup\"]\n del s[\"reducer\"]\n\n if not self.comments:\n del s[\"remarks\"]\n else:\n s[\"remarks\"] = s[\"remarks\"].replace('\"', \"'\")[0:750]\n\n s.update(header)\n\n if s[\"relatedFiles\"] and filesAppended == False:\n filesAppended = True\n for f in relatedFiles:\n # ORDER THE DICTIONARY FOR THIS ROW OF\n # RESULTS\n thisFile = collections.OrderedDict()\n thisFile[\"TNSId\"] = TNSId\n thisFile[\"filename\"] = f[\n \"filepath\"].split(\"/\")[-1]\n thisFile[\"url\"] = f[\"filepath\"]\n if self.comments:\n thisFile[\"comment\"] = f[\n \"fileComment\"].replace(\"\\n\", \" \").strip()\n thisFile[\"dateObs\"] = s[\"obsdate\"]\n thisFile[\"spec1phot2\"] = 1\n relatedFilesTable.append(thisFile)\n\n for ffile in [s[\"filepath\"], s[\"fitsFilepath\"]]:\n if ffile:\n # ORDER THE DICTIONARY FOR THIS ROW OF\n # RESULTS\n thisFile = collections.OrderedDict()\n thisFile[\"TNSId\"] = TNSId\n thisFile[\"filename\"] = ffile.split(\n \"/\")[-1]\n thisFile[\"url\"] = ffile\n if self.comments:\n thisFile[\"comment\"] = \"\"\n thisFile[\"dateObs\"] = s[\"obsdate\"]\n thisFile[\"spec1phot2\"] = 1\n relatedFilesTable.append(thisFile)\n\n del s[\"filepath\"]\n del s[\"fitsFilepath\"]\n del s[\"relatedFiles\"]\n\n # ORDER THE DICTIONARY FOR THIS ROW OF RESULTS\n orow = collections.OrderedDict()\n keyOrder = [\"TNSId\", \"survey\", \"obsdate\", \"specType\", \"transRedshift\",\n \"telescope\", \"exptime\", \"reportAddedDate\", \"TNSuser\"]\n for k, v in s.iteritems():\n if k not in keyOrder:\n keyOrder.append(k)\n for k in keyOrder:\n try:\n orow[k] = s[k]\n except:\n self.log.info(\n \"`%(k)s` not found in the source data for %(TNSId)s\" % locals())\n pass\n\n specData.append(orow)\n\n self.log.info('completed the ``_parse_spectral_data`` method')\n return specData, relatedFilesTable\n"
] |
class search():
"""
*The worker class for the transient namer search module*
**Key Arguments:**
- ``log`` -- logger
- ``settings`` -- the settings dictionary
- ``ra`` -- RA of the location being checked
- ``dec`` -- DEC of the location being searched
- ``radiusArcsec`` - the radius of the conesearch to perform against the TNS
- ``name`` -- name of the object to search the TNS for
- ``discInLastDays`` -- search the TNS for transient discovered in the last X days
- ``comments`` -- print the comments from the TNS, note these can be long making table outputs somewhat unreadable. Default *False*
**Usage:**
To initiate a search object to search the TNS via an object name (either TNS or survey names accepted):
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
name="Gaia16bbi"
)
or for a conesearch use something similar to:
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
ra="06:50:36.74",
dec="+31:06:44.7",
radiusArcsec=5
)
Note the search method can accept coordinates in sexagesimal or decimal defree formats.
To list all new objects discovered in the last three weeks, then use:
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
discInLastDays=21
)
"""
# Initialisation
def __init__(
self,
log,
ra="",
dec="",
radiusArcsec="",
name="",
discInLastDays="",
settings=False,
comments=False
):
self.log = log
log.debug("instansiating a new 'search' object")
self.settings = settings
self.ra = ra
self.dec = dec
self.radiusArcsec = radiusArcsec
self.comments = comments
self.name = name
self.internal_name = ""
self.discInLastDays = discInLastDays
self.page = 0
self.batchSize = 1000
# CREATE THE TIME-RANGE WINDOW TO SEARCH TNS
if not discInLastDays:
self.start = ""
self.end = ""
else:
discInLastDays = int(discInLastDays)
td = timedelta(days=1)
end = datetime.now() + td
self.end = end.strftime("%Y-%m-%d")
td = timedelta(days=discInLastDays)
start = datetime.now() - td
self.start = start.strftime("%Y-%m-%d")
# DETERMINE IF WE HAVE A TNS OR INTERAL SURVEY NAME
if self.name:
matchObject = re.match(r'^((SN|AT) ?)?(\d{4}\w{1,6})', self.name)
if matchObject:
self.name = matchObject.group(3)
else:
self.internal_name = self.name
self.name = ""
# DO THE SEARCH OF THE TNS AND COMPILE THE RESULTS INTO SEPARATE RESULT
# SETS
self.sourceResultsList, self.photResultsList, self.specResultsList, self.relatedFilesResultsList = self._query_tns()
self.sourceResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.sourceResultsList
)
self.photResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.photResultsList
)
self.specResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.specResultsList
)
self.relatedFilesResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.relatedFilesResultsList
)
return None
@property
def sources(
self):
"""*The results of the search returned as a python list of dictionaries*
**Usage:**
.. code-block:: python
sources = tns.sources
"""
sourceResultsList = []
sourceResultsList[:] = [dict(l) for l in self.sourceResultsList]
return sourceResultsList
@property
def spectra(
self):
"""*The associated source spectral data*
**Usage:**
.. code-block:: python
sourceSpectra = tns.spectra
"""
specResultsList = []
specResultsList[:] = [dict(l) for l in self.specResultsList]
return specResultsList
@property
def files(
self):
"""*The associated source files*
**Usage:**
.. code-block:: python
sourceFiles = tns.files
"""
relatedFilesResultsList = []
relatedFilesResultsList[:] = [dict(l)
for l in self.relatedFilesResultsList]
return relatedFilesResultsList
@property
def photometry(
self):
"""*The associated source photometry*
**Usage:**
.. code-block:: python
sourcePhotometry = tns.photometry
"""
photResultsList = []
photResultsList[:] = [dict(l) for l in self.photResultsList]
return photResultsList
@property
def url(
self):
"""*The generated URL used for searching of the TNS*
**Usage:**
.. code-block:: python
searchURL = tns.url
"""
return self._searchURL
def csv(
self,
dirPath=None):
"""*Render the results in csv format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `csvSources` -- the top-level transient data
- `csvPhot` -- all photometry associated with the transients
- `csvSpec` -- all spectral data associated with the transients
- `csvFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in csv format:
.. code-block:: python
csvSources, csvPhot, csvSpec, csvFiles = tns.csv()
print csvSources
.. code-block:: text
TNSId,TNSName,discoveryName,discSurvey,raSex,decSex,raDeg,decDeg,transRedshift,specType,discMag,discMagFilter,discDate,objectUrl,hostName,hostRedshift,separationArcsec,separationNorthArcsec,separationEastArcsec
2016asf,SN2016asf,ASASSN-16cs,ASAS-SN,06:50:36.73,+31:06:45.36,102.6530,31.1126,0.021,SN Ia,17.1,V-Johnson,2016-03-06 08:09:36,http://wis-tns.weizmann.ac.il/object/2016asf,KUG 0647+311,,0.66,0.65,-0.13
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.csv("~/tns")
.. image:: https://i.imgur.com/BwwqMBg.png
:width: 800px
:alt: csv output
"""
if dirPath:
p = self._file_prefix()
csvSources = self.sourceResults.csv(
filepath=dirPath + "/" + p + "sources.csv")
csvPhot = self.photResults.csv(
filepath=dirPath + "/" + p + "phot.csv")
csvSpec = self.specResults.csv(
filepath=dirPath + "/" + p + "spec.csv")
csvFiles = self.relatedFilesResults.csv(
filepath=dirPath + "/" + p + "relatedFiles.csv")
else:
csvSources = self.sourceResults.csv()
csvPhot = self.photResults.csv()
csvSpec = self.specResults.csv()
csvFiles = self.relatedFilesResults.csv()
return csvSources, csvPhot, csvSpec, csvFiles
def json(
self,
dirPath=None):
"""*Render the results in json format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `jsonSources` -- the top-level transient data
- `jsonPhot` -- all photometry associated with the transients
- `jsonSpec` -- all spectral data associated with the transients
- `jsonFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in json format:
.. code-block:: python
jsonSources, jsonPhot, jsonSpec, jsonFiles = tns.json()
print jsonSources
.. code-block:: text
[
{
"TNSId": "2016asf",
"TNSName": "SN2016asf",
"decDeg": 31.1126,
"decSex": "+31:06:45.36",
"discDate": "2016-03-06 08:09:36",
"discMag": "17.1",
"discMagFilter": "V-Johnson",
"discSurvey": "ASAS-SN",
"discoveryName": "ASASSN-16cs",
"hostName": "KUG 0647+311",
"hostRedshift": null,
"objectUrl": "http://wis-tns.weizmann.ac.il/object/2016asf",
"raDeg": 102.65304166666667,
"raSex": "06:50:36.73",
"separationArcsec": "0.66",
"separationEastArcsec": "-0.13",
"separationNorthArcsec": "0.65",
"specType": "SN Ia",
"transRedshift": "0.021"
}
]
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.json("~/tns")
.. image:: https://i.imgur.com/wAHqARI.png
:width: 800px
:alt: json output
"""
if dirPath:
p = self._file_prefix()
jsonSources = self.sourceResults.json(
filepath=dirPath + "/" + p + "sources.json")
jsonPhot = self.photResults.json(
filepath=dirPath + "/" + p + "phot.json")
jsonSpec = self.specResults.json(
filepath=dirPath + "/" + p + "spec.json")
jsonFiles = self.relatedFilesResults.json(
filepath=dirPath + "/" + p + "relatedFiles.json")
else:
jsonSources = self.sourceResults.json()
jsonPhot = self.photResults.json()
jsonSpec = self.specResults.json()
jsonFiles = self.relatedFilesResults.json()
return jsonSources, jsonPhot, jsonSpec, jsonFiles
def yaml(
self,
dirPath=None):
"""*Render the results in yaml format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `yamlSources` -- the top-level transient data
- `yamlPhot` -- all photometry associated with the transients
- `yamlSpec` -- all spectral data associated with the transients
- `yamlFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in yaml format:
.. code-block:: python
yamlSources, yamlPhot, yamlSpec, yamlFiles = tns.yaml()
print yamlSources
.. code-block:: text
- TNSId: 2016asf
TNSName: SN2016asf
decDeg: 31.1126
decSex: '+31:06:45.36'
discDate: '2016-03-06 08:09:36'
discMag: '17.1'
discMagFilter: V-Johnson
discSurvey: ASAS-SN
discoveryName: ASASSN-16cs
hostName: KUG 0647+311
hostRedshift: null
objectUrl: http://wis-tns.weizmann.ac.il/object/2016asf
raDeg: 102.65304166666667
raSex: '06:50:36.73'
separationArcsec: '0.66'
separationEastArcsec: '-0.13'
separationNorthArcsec: '0.65'
specType: SN Ia
transRedshift: '0.021'
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.yaml("~/tns")
.. image:: https://i.imgur.com/ZpJIC6p.png
:width: 800px
:alt: yaml output
"""
if dirPath:
p = self._file_prefix()
yamlSources = self.sourceResults.yaml(
filepath=dirPath + "/" + p + "sources.yaml")
yamlPhot = self.photResults.yaml(
filepath=dirPath + "/" + p + "phot.yaml")
yamlSpec = self.specResults.yaml(
filepath=dirPath + "/" + p + "spec.yaml")
yamlFiles = self.relatedFilesResults.yaml(
filepath=dirPath + "/" + p + "relatedFiles.yaml")
else:
yamlSources = self.sourceResults.yaml()
yamlPhot = self.photResults.yaml()
yamlSpec = self.specResults.yaml()
yamlFiles = self.relatedFilesResults.yaml()
return yamlSources, yamlPhot, yamlSpec, yamlFiles
def markdown(
self,
dirPath=None):
"""*Render the results in markdown format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `markdownSources` -- the top-level transient data
- `markdownPhot` -- all photometry associated with the transients
- `markdownSpec` -- all spectral data associated with the transients
- `markdownFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in markdown table format:
.. code-block:: python
markdownSources, markdownPhot, markdownSpec, markdownFiles = tns.markdown()
print markdownSources
.. code-block:: text
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
|:---------|:-----------|:---------------|:------------|:-------------|:--------------|:----------|:---------|:---------------|:----------|:---------|:---------------|:---------------------|:----------------------------------------------|:--------------|:--------------|:------------------|:-----------------------|:----------------------|
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.markdown("~/tns")
.. image:: https://i.imgur.com/AYLBQoJ.png
:width: 800px
:alt: markdown output
"""
if dirPath:
p = self._file_prefix()
markdownSources = self.sourceResults.markdown(
filepath=dirPath + "/" + p + "sources.md")
markdownPhot = self.photResults.markdown(
filepath=dirPath + "/" + p + "phot.md")
markdownSpec = self.specResults.markdown(
filepath=dirPath + "/" + p + "spec.md")
markdownFiles = self.relatedFilesResults.markdown(
filepath=dirPath + "/" + p + "relatedFiles.md")
else:
markdownSources = self.sourceResults.markdown()
markdownPhot = self.photResults.markdown()
markdownSpec = self.specResults.markdown()
markdownFiles = self.relatedFilesResults.markdown()
return markdownSources, markdownPhot, markdownSpec, markdownFiles
def table(
self,
dirPath=None):
"""*Render the results as an ascii table*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `tableSources` -- the top-level transient data
- `tablePhot` -- all photometry associated with the transients
- `tableSpec` -- all spectral data associated with the transients
- `tableFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in ascii table format:
.. code-block:: python
tableSources, tablePhot, tableSpec, tableFiles = tns.table()
print tableSources
.. code-block:: text
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.table("~/tns")
.. image:: https://i.imgur.com/m09M0ho.png
:width: 800px
:alt: ascii files
"""
if dirPath:
p = self._file_prefix()
tableSources = self.sourceResults.table(
filepath=dirPath + "/" + p + "sources.ascii")
tablePhot = self.photResults.table(
filepath=dirPath + "/" + p + "phot.ascii")
tableSpec = self.specResults.table(
filepath=dirPath + "/" + p + "spec.ascii")
tableFiles = self.relatedFilesResults.table(
filepath=dirPath + "/" + p + "relatedFiles.ascii")
else:
tableSources = self.sourceResults.table()
tablePhot = self.photResults.table()
tableSpec = self.specResults.table()
tableFiles = self.relatedFilesResults.table()
return tableSources, tablePhot, tableSpec, tableFiles
def mysql(
self,
tableNamePrefix="TNS",
dirPath=None):
"""*Render the results as MySQL Insert statements*
**Key Arguments:**
- ``tableNamePrefix`` -- the prefix for the database table names to assign the insert statements to. Default *TNS*.
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `mysqlSources` -- the top-level transient data
- `mysqlPhot` -- all photometry associated with the transients
- `mysqlSpec` -- all spectral data associated with the transients
- `mysqlFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in mysql insert format:
.. code-block:: python
mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles = tns.mysql("TNS")
print mysqlSources
.. code-block:: text
INSERT INTO `TNS_sources` (TNSId,TNSName,dateCreated,decDeg,decSex,discDate,discMag,discMagFilter,discSurvey,discoveryName,hostName,hostRedshift,objectUrl,raDeg,raSex,separationArcsec,separationEastArcsec,separationNorthArcsec,specType,transRedshift) VALUES ("2016asf" ,"SN2016asf" ,"2016-09-20T11:22:13" ,"31.1126" ,"+31:06:45.36" ,"2016-03-06 08:09:36" ,"17.1" ,"V-Johnson" ,"ASAS-SN" ,"ASASSN-16cs" ,"KUG 0647+311" ,null ,"http://wis-tns.weizmann.ac.il/object/2016asf" ,"102.653041667" ,"06:50:36.73" ,"0.66" ,"-0.13" ,"0.65" ,"SN Ia" ,"0.021") ON DUPLICATE KEY UPDATE TNSId="2016asf", TNSName="SN2016asf", dateCreated="2016-09-20T11:22:13", decDeg="31.1126", decSex="+31:06:45.36", discDate="2016-03-06 08:09:36", discMag="17.1", discMagFilter="V-Johnson", discSurvey="ASAS-SN", discoveryName="ASASSN-16cs", hostName="KUG 0647+311", hostRedshift=null, objectUrl="http://wis-tns.weizmann.ac.il/object/2016asf", raDeg="102.653041667", raSex="06:50:36.73", separationArcsec="0.66", separationEastArcsec="-0.13", separationNorthArcsec="0.65", specType="SN Ia", transRedshift="0.021", updated=1, dateLastModified=NOW() ;
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.mysql("TNS", "~/tns")
.. image:: https://i.imgur.com/CozySPW.png
:width: 800px
:alt: mysql output
"""
if dirPath:
p = self._file_prefix()
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_sources` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(20) NOT NULL,
`TNSName` varchar(20) DEFAULT NULL,
`dateCreated` datetime DEFAULT NULL,
`decDeg` double DEFAULT NULL,
`decSex` varchar(45) DEFAULT NULL,
`discDate` datetime DEFAULT NULL,
`discMag` double DEFAULT NULL,
`discMagFilter` varchar(45) DEFAULT NULL,
`discSurvey` varchar(100) DEFAULT NULL,
`discoveryName` varchar(100) DEFAULT NULL,
`objectUrl` varchar(200) DEFAULT NULL,
`raDeg` double DEFAULT NULL,
`raSex` varchar(45) DEFAULT NULL,
`specType` varchar(100) DEFAULT NULL,
`transRedshift` double DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`hostName` VARCHAR(100) NULL DEFAULT NULL,
`hostRedshift` DOUBLE NULL DEFAULT NULL,
`survey` VARCHAR(100) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid` (`TNSId`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlSources = self.sourceResults.mysql(
tableNamePrefix + "_sources", filepath=dirPath + "/" + p + "sources.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_photometry` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(20) NOT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`exptime` double DEFAULT NULL,
`filter` varchar(100) DEFAULT NULL,
`limitingMag` tinyint(4) DEFAULT NULL,
`mag` double DEFAULT NULL,
`magErr` double DEFAULT NULL,
`magUnit` varchar(100) DEFAULT NULL,
`objectName` varchar(100) DEFAULT NULL,
`obsdate` datetime DEFAULT NULL,
`reportAddedDate` datetime DEFAULT NULL,
`suggestedType` varchar(100) DEFAULT NULL,
`survey` varchar(100) DEFAULT NULL,
`telescope` varchar(100) DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`remarks` VARCHAR(800) NULL DEFAULT NULL,
`sourceComment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid_survey_obsdate` (`TNSId`,`survey`,`obsdate`),
UNIQUE INDEX `u_tnsid_survey_obsdate` (`TNSId` ASC, `survey` ASC, `obsdate` ASC),
UNIQUE INDEX `u_tnsid_obsdate_objname` (`TNSId` ASC, `obsdate` ASC, `objectName` ASC)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlPhot = self.photResults.mysql(
tableNamePrefix + "_photometry", filepath=dirPath + "/" + p + "phot.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_spectra` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(45) NOT NULL,
`TNSuser` varchar(45) DEFAULT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`exptime` double DEFAULT NULL,
`obsdate` datetime DEFAULT NULL,
`reportAddedDate` datetime DEFAULT NULL,
`specType` varchar(100) DEFAULT NULL,
`survey` varchar(100) DEFAULT NULL,
`telescope` varchar(100) DEFAULT NULL,
`transRedshift` double DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`remarks` VARCHAR(800) NULL DEFAULT NULL,
`sourceComment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `u_tnsid_survey_obsdate` (`TNSId`,`survey`,`obsdate`),
UNIQUE KEY `u_id_user_obsdate` (`TNSId`,`TNSuser`,`obsdate`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlSpec = self.specResults.mysql(
tableNamePrefix + "_spectra", filepath=dirPath + "/" + p + "spec.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_files` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(100) NOT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`dateObs` datetime DEFAULT NULL,
`filename` varchar(200) DEFAULT NULL,
`spec1phot2` tinyint(4) DEFAULT NULL,
`url` varchar(800) DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`comment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid_url` (`TNSId`,`url`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlFiles = self.relatedFilesResults.mysql(
tableNamePrefix + "_files", filepath=dirPath + "/" + p + "relatedFiles.sql", createStatement=createStatement)
else:
mysqlSources = self.sourceResults.mysql(
tableNamePrefix + "_sources")
mysqlPhot = self.photResults.mysql(tableNamePrefix + "_photometry")
mysqlSpec = self.specResults.mysql(tableNamePrefix + "_spectra")
mysqlFiles = self.relatedFilesResults.mysql(
tableNamePrefix + "_files")
return mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles
def _get_tns_search_results(
self):
"""
*query the tns and result the response*
"""
self.log.info('starting the ``_get_tns_search_results`` method')
try:
response = requests.get(
url="http://wis-tns.weizmann.ac.il/search",
params={
"page": self.page,
"ra": self.ra,
"decl": self.dec,
"radius": self.radiusArcsec,
"name": self.name,
"internal_name": self.internal_name,
"date_start[date]": self.start,
"date_end[date]": self.end,
"num_page": self.batchSize,
"display[redshift]": "1",
"display[hostname]": "1",
"display[host_redshift]": "1",
"display[source_group_name]": "1",
"display[internal_name]": "1",
"display[spectra_count]": "1",
"display[discoverymag]": "1",
"display[discmagfilter]": "1",
"display[discoverydate]": "1",
"display[discoverer]": "1",
"display[sources]": "1",
"display[bibcode]": "1",
},
)
except requests.exceptions.RequestException:
print('HTTP Request failed')
self.log.info('completed the ``_get_tns_search_results`` method')
return response.status_code, response.content, response.url
def _file_prefix(
self):
"""*Generate a file prefix based on the type of search for saving files to disk*
**Return:**
- ``prefix`` -- the file prefix
"""
self.log.info('starting the ``_file_prefix`` method')
if self.ra:
now = datetime.now()
prefix = now.strftime("%Y%m%dt%H%M%S%f_tns_conesearch_")
elif self.name:
prefix = self.name + "_tns_conesearch_"
elif self.internal_name:
prefix = self.internal_name + "_tns_conesearch_"
elif self.discInLastDays:
discInLastDays = str(self.discInLastDays)
now = datetime.now()
prefix = now.strftime(
discInLastDays + "d_since_%Y%m%d_tns_conesearch_")
self.log.info('completed the ``_file_prefix`` method')
return prefix
def _parse_transient_rows(
self,
content,
count=False):
"""* parse transient rows from the TNS result page content*
**Key Arguments:**
- ``content`` -- the content from the TNS results page.
- ``count`` -- return only the number of rows
**Return:**
- ``transientRows``
"""
self.log.info('starting the ``_parse_transient_rows`` method')
regexForRow = r"""\n([^\n]*?<a href="/object/.*?)(?=\n[^\n]*?<a href="/object/|<\!\-\- /\.section, /#content \-\->)"""
if count:
# A SINGLE SOURCE BLOCK
matchedSources = re.findall(
regexForRow,
content,
flags=re.S # re.S
)
return len(matchedSources)
# A SINGLE SOURCE BLOCK
matchedSources = re.finditer(
regexForRow,
content,
flags=re.S # re.S
)
self.log.info('completed the ``_parse_transient_rows`` method')
return matchedSources
def _parse_discovery_information(
self,
content):
"""* parse discovery information from one row on the TNS results page*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page.
**Return:**
- ``discoveryData`` -- dictionary of results
- ``TNSId`` -- the unique TNS id for the transient
"""
self.log.info('starting the ``_parse_discovery_information`` method')
# ASTROCALC UNIT CONVERTER OBJECT
converter = unit_conversion(
log=self.log
)
matches = re.finditer(
r"""<tr class="row-.*?"><td class="cell-id">(?P<tnsId>\d*?)</td><td class="cell-name"><a href="(?P<objectUrl>.*?)">(?P<TNSName>.*?)</a></td><td class="cell-.*?<td class="cell-ra">(?P<raSex>.*?)</td><td class="cell-decl">(?P<decSex>.*?)</td><td class="cell-ot_name">(?P<specType>.*?)</td><td class="cell-redshift">(?P<transRedshift>.*?)</td><td class="cell-hostname">(?P<hostName>.*?)</td><td class="cell-host_redshift">(?P<hostRedshift>.*?)</td><td class="cell-source_group_name">(?P<discSurvey>.*?)</td>.*?<td class="cell-internal_name">(<a.*?>)?(?P<discoveryName>.*?)(</a>)?</td>.*?<td class="cell-discoverymag">(?P<discMag>.*?)</td><td class="cell-disc_filter_name">(?P<discMagFilter>.*?)</td><td class="cell-discoverydate">(?P<discDate>.*?)</td><td class="cell-discoverer">(?P<sender>.*?)</td>.*?</tr>""",
content,
flags=0 # re.S
)
discoveryData = []
for match in matches:
row = match.groupdict()
for k, v in row.iteritems():
row[k] = v.strip()
if len(v) == 0:
row[k] = None
if row["transRedshift"] == 0:
row["transRedshift"] = None
if row["TNSName"][0] in ["1", "2"]:
row["TNSName"] = "SN" + row["TNSName"]
row["objectUrl"] = "http://wis-tns.weizmann.ac.il" + \
row["objectUrl"]
# CONVERT COORDINATES TO DECIMAL DEGREES
row["raDeg"] = converter.ra_sexegesimal_to_decimal(
ra=row["raSex"]
)
row["decDeg"] = converter.dec_sexegesimal_to_decimal(
dec=row["decSex"]
)
# IF THIS IS A COORDINATE SEARCH, ADD SEPARATION FROM
# ORIGINAL QUERY COORDINATES
if self.ra:
# CALCULATE SEPARATION IN ARCSEC
from astrocalc.coords import separations
calculator = separations(
log=self.log,
ra1=self.ra,
dec1=self.dec,
ra2=row["raDeg"],
dec2=row["decDeg"],
)
angularSeparation, north, east = calculator.get()
row["separationArcsec"] = angularSeparation
row["separationNorthArcsec"] = north
row["separationEastArcsec"] = east
if not row["discSurvey"]:
row["survey"] = row["sender"]
del row["sender"]
del row["tnsId"]
row["TNSName"] = row["TNSName"].replace(" ", "")
row["TNSId"] = row["TNSName"].replace(
"SN", "").replace("AT", "")
TNSId = row["TNSId"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "TNSName", "discoveryName", "discSurvey", "raSex", "decSex", "raDeg", "decDeg",
"transRedshift", "specType", "discMag", "discMagFilter", "discDate", "objectUrl", "hostName", "hostRedshift", "separationArcsec", "separationNorthArcsec", "separationEastArcsec"]
for k, v in row.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = row[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
discoveryData.append(row)
self.log.info('completed the ``_parse_discovery_information`` method')
return discoveryData[0], TNSId
def _parse_photometry_data(
self,
content,
TNSId):
"""*parse photometry data from a row in the tns results content*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page
- ``TNSId`` -- the tns id of the transient
**Return:**
- ``photData`` -- a list of dictionaries of the photometry data
- ``relatedFilesTable`` -- a list of dictionaries of transient photometry related files
"""
self.log.info('starting the ``_parse_photometry_data`` method')
photData = []
relatedFilesTable = []
# AT REPORT BLOCK
ATBlock = re.search(
r"""<tr class=[^\n]*?AT reportings.*?(?=<tr class=[^\n]*?Classification reportings|$)""",
content,
flags=re.S # re.S
)
if ATBlock:
ATBlock = ATBlock.group()
reports = re.finditer(
r"""<tr class="row-[^"]*"><td class="cell-id">.*?</table>""",
ATBlock,
flags=re.S # re.S
)
relatedFiles = self._parse_related_files(ATBlock)
for r in reports:
header = re.search(
r"""<tr class="row[^"]*".*?time_received">(?P<reportAddedDate>[^<]*).*?user_name">(?P<sender>[^<]*).*?reporter_name">(?P<reporters>[^<]*).*?source_group_name">(?P<surveyGroup>[^<]*).*?ra">(?P<ra>[^<]*).*?decl">(?P<dec>[^<]*).*?discovery_date">(?P<obsDate>[^<]*).*?flux">(?P<mag>[^<]*).*?filter_name">(?P<magFilter>[^<]*).*?related_files">(?P<relatedFiles>[^<]*).*?type_name">(?P<suggestedType>[^<]*).*?hostname">(?P<hostName>[^<]*).*?host_redshift">(?P<hostRedshift>[^<]*).*?internal_name">(?P<objectName>[^<]*).*?groups">(?P<survey>[^<]*).*?remarks">(?P<sourceComment>[^<]*)""",
r.group(),
flags=0 # re.S
)
try:
header = header.groupdict()
except:
print r.group()
header["TNSId"] = TNSId
del header["reporters"]
del header["surveyGroup"]
del header["hostName"]
del header["hostRedshift"]
del header["mag"]
del header["magFilter"]
del header["obsDate"]
del header["ra"]
del header["dec"]
if not self.comments:
del header['sourceComment']
else:
theseComments = header[
"sourceComment"].split("\n")
header["sourceComment"] = ""
for c in theseComments:
header["sourceComment"] += " " + c.strip()
header["sourceComment"] = header[
"sourceComment"].strip().replace('"', "'")[0:750]
phot = re.finditer(
r"""<tr class="row\-[^"]*".*?obsdate">(?P<obsdate>[^<]*).*?flux">(?P<mag>[^<]*).*?fluxerr">(?P<magErr>[^<]*).*?limflux">(?P<limitingMag>[^<]*).*?unit_name">(?P<magUnit>[^<]*).*?filter_name">(?P<filter>[^<]*).*?tel_inst">(?P<telescope>[^<]*).*?exptime">(?P<exptime>[^<]*).*?observer">(?P<observer>[^<]*).*?-remarks">(?P<remarks>[^<]*)""",
r.group(),
flags=0 # re.S
)
filesAppended = False
for p in phot:
p = p.groupdict()
del p["observer"]
if p["limitingMag"] and not p["mag"]:
p["mag"] = p["limitingMag"]
p["limitingMag"] = 1
p["remarks"] = p["remarks"].replace(
"[Last non detection]", "")
else:
p["limitingMag"] = 0
if not self.comments:
del p["remarks"]
p.update(header)
if p["relatedFiles"] and filesAppended == False:
filesAppended = True
for f in relatedFiles:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = f[
"filepath"].split("/")[-1]
thisFile["url"] = f["filepath"]
if self.comments:
thisFile["comment"] = f[
"fileComment"].replace("\n", " ").strip().replace('"', "'")[0:750]
thisFile["dateObs"] = p["obsdate"]
thisFile["spec1phot2"] = 2
relatedFilesTable.append(thisFile)
if not p["survey"] and not p["objectName"]:
p["survey"] = p["sender"]
del p["relatedFiles"]
del p["sender"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "survey", "obsdate", "filter", "limitingMag", "mag", "magErr",
"magUnit", "suggestedType", "telescope", "exptime", "reportAddedDate"]
for k, v in p.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = p[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
photData.append(orow)
self.log.info('completed the ``_parse_photometry_data`` method')
return photData, relatedFilesTable
def _parse_related_files(
self,
content):
"""*parse the contents for related files URLs and comments*
**Key Arguments:**
- ``content`` -- the content to parse.
**Return:**
- ``relatedFiles`` -- a list of dictionaries of transient related files
"""
self.log.info('starting the ``_parse_related_files`` method')
relatedFilesList = re.finditer(
r"""<td class="cell-filename">.*?href="(?P<filepath>[^"]*).*?remarks">(?P<fileComment>[^<]*)""",
content,
flags=0 # re.S
)
relatedFiles = []
for f in relatedFilesList:
f = f.groupdict()
relatedFiles.append(f)
self.log.info('completed the ``_parse_related_files`` method')
return relatedFiles
def _parse_spectral_data(
self,
content,
TNSId):
"""*parse spectra data from a row in the tns results content*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page
- ``TNSId`` -- the tns id of the transient
**Return:**
- ``specData`` -- a list of dictionaries of the spectral data
- ``relatedFilesTable`` -- a list of dictionaries of transient spectrum related files
"""
self.log.info('starting the ``_parse_spectral_data`` method')
specData = []
relatedFilesTable = []
# CLASSIFICATION BLOCK
classBlock = re.search(
r"""<tr class=[^\n]*?Classification reportings.*$""",
content,
flags=re.S # re.S
)
if classBlock:
classBlock = classBlock.group()
reports = re.finditer(
r"""<tr class="row-[^"]*"><td class="cell-id">.*?</tbody>\s*</table>\s*</div></td> </tr>\s*</tbody>\s*</table>\s*</div></td> </tr>""",
classBlock,
flags=re.S #
)
relatedFiles = self._parse_related_files(classBlock)
for r in reports:
header = re.search(
r"""<tr class="row.*?time_received">(?P<reportAddedDate>[^<]*).*?user_name">(?P<TNSuser>[^<]*).*?classifier_name">(?P<reporters>[^<]*).*?source_group_name">(?P<survey>[^<]*).*?-type">(?P<specType>[^<]*).*?-redshift">(?P<transRedshift>[^<]*).*?-related_files">(?P<relatedFiles>[^<]*).*?-groups">(?P<surveyGroup>[^<]*).*?-remarks">(?P<sourceComment>[^<]*)</td>""",
r.group(),
flags=re.S # re.S
)
if not header:
continue
header = header.groupdict()
header["TNSId"] = TNSId
del header["reporters"]
del header["surveyGroup"]
del header["survey"]
if not self.comments:
del header['sourceComment']
else:
theseComments = header[
"sourceComment"].split("\n")
header["sourceComment"] = ""
for c in theseComments:
header["sourceComment"] += " " + c.strip()
header["sourceComment"] = header[
"sourceComment"].strip().replace('"', "'")[0:750]
spec = re.finditer(
r"""<tr class="class-results-.*?-obsdate">(?P<obsdate>[^<]*).*?-tel_inst">(?P<telescope>[^<]*).*?-exptime">(?P<exptime>[^<]*).*?-observer">(?P<sender>[^<]*).*?-reducer">(?P<reducer>[^<]*).*?-source_group_name">(?P<survey>[^<]*).*?-asciifile">(.*?<a href="(?P<filepath>[^"]*)".*?</a>)?.*?-fitsfile">(.*?<a href="(?P<fitsFilepath>[^"]*)".*?</a>)?.*?-groups">(?P<surveyGroup>[^<]*).*?-remarks">(?P<remarks>[^<]*)""",
r.group(),
flags=0 # re.S
)
filesAppended = False
for s in spec:
s = s.groupdict()
del s["sender"]
del s["surveyGroup"]
del s["reducer"]
if not self.comments:
del s["remarks"]
else:
s["remarks"] = s["remarks"].replace('"', "'")[0:750]
s.update(header)
if s["relatedFiles"] and filesAppended == False:
filesAppended = True
for f in relatedFiles:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = f[
"filepath"].split("/")[-1]
thisFile["url"] = f["filepath"]
if self.comments:
thisFile["comment"] = f[
"fileComment"].replace("\n", " ").strip()
thisFile["dateObs"] = s["obsdate"]
thisFile["spec1phot2"] = 1
relatedFilesTable.append(thisFile)
for ffile in [s["filepath"], s["fitsFilepath"]]:
if ffile:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = ffile.split(
"/")[-1]
thisFile["url"] = ffile
if self.comments:
thisFile["comment"] = ""
thisFile["dateObs"] = s["obsdate"]
thisFile["spec1phot2"] = 1
relatedFilesTable.append(thisFile)
del s["filepath"]
del s["fitsFilepath"]
del s["relatedFiles"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "survey", "obsdate", "specType", "transRedshift",
"telescope", "exptime", "reportAddedDate", "TNSuser"]
for k, v in s.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = s[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
specData.append(orow)
self.log.info('completed the ``_parse_spectral_data`` method')
return specData, relatedFilesTable
|
thespacedoctor/transientNamer
|
transientNamer/search.py
|
search._get_tns_search_results
|
python
|
def _get_tns_search_results(
self):
self.log.info('starting the ``_get_tns_search_results`` method')
try:
response = requests.get(
url="http://wis-tns.weizmann.ac.il/search",
params={
"page": self.page,
"ra": self.ra,
"decl": self.dec,
"radius": self.radiusArcsec,
"name": self.name,
"internal_name": self.internal_name,
"date_start[date]": self.start,
"date_end[date]": self.end,
"num_page": self.batchSize,
"display[redshift]": "1",
"display[hostname]": "1",
"display[host_redshift]": "1",
"display[source_group_name]": "1",
"display[internal_name]": "1",
"display[spectra_count]": "1",
"display[discoverymag]": "1",
"display[discmagfilter]": "1",
"display[discoverydate]": "1",
"display[discoverer]": "1",
"display[sources]": "1",
"display[bibcode]": "1",
},
)
except requests.exceptions.RequestException:
print('HTTP Request failed')
self.log.info('completed the ``_get_tns_search_results`` method')
return response.status_code, response.content, response.url
|
*query the tns and result the response*
|
train
|
https://github.com/thespacedoctor/transientNamer/blob/39be410c84275ed4669632f5df67e728d66a318f/transientNamer/search.py#L779-L818
| null |
class search():
"""
*The worker class for the transient namer search module*
**Key Arguments:**
- ``log`` -- logger
- ``settings`` -- the settings dictionary
- ``ra`` -- RA of the location being checked
- ``dec`` -- DEC of the location being searched
- ``radiusArcsec`` - the radius of the conesearch to perform against the TNS
- ``name`` -- name of the object to search the TNS for
- ``discInLastDays`` -- search the TNS for transient discovered in the last X days
- ``comments`` -- print the comments from the TNS, note these can be long making table outputs somewhat unreadable. Default *False*
**Usage:**
To initiate a search object to search the TNS via an object name (either TNS or survey names accepted):
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
name="Gaia16bbi"
)
or for a conesearch use something similar to:
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
ra="06:50:36.74",
dec="+31:06:44.7",
radiusArcsec=5
)
Note the search method can accept coordinates in sexagesimal or decimal defree formats.
To list all new objects discovered in the last three weeks, then use:
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
discInLastDays=21
)
"""
# Initialisation
def __init__(
self,
log,
ra="",
dec="",
radiusArcsec="",
name="",
discInLastDays="",
settings=False,
comments=False
):
self.log = log
log.debug("instansiating a new 'search' object")
self.settings = settings
self.ra = ra
self.dec = dec
self.radiusArcsec = radiusArcsec
self.comments = comments
self.name = name
self.internal_name = ""
self.discInLastDays = discInLastDays
self.page = 0
self.batchSize = 1000
# CREATE THE TIME-RANGE WINDOW TO SEARCH TNS
if not discInLastDays:
self.start = ""
self.end = ""
else:
discInLastDays = int(discInLastDays)
td = timedelta(days=1)
end = datetime.now() + td
self.end = end.strftime("%Y-%m-%d")
td = timedelta(days=discInLastDays)
start = datetime.now() - td
self.start = start.strftime("%Y-%m-%d")
# DETERMINE IF WE HAVE A TNS OR INTERAL SURVEY NAME
if self.name:
matchObject = re.match(r'^((SN|AT) ?)?(\d{4}\w{1,6})', self.name)
if matchObject:
self.name = matchObject.group(3)
else:
self.internal_name = self.name
self.name = ""
# DO THE SEARCH OF THE TNS AND COMPILE THE RESULTS INTO SEPARATE RESULT
# SETS
self.sourceResultsList, self.photResultsList, self.specResultsList, self.relatedFilesResultsList = self._query_tns()
self.sourceResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.sourceResultsList
)
self.photResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.photResultsList
)
self.specResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.specResultsList
)
self.relatedFilesResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.relatedFilesResultsList
)
return None
@property
def sources(
self):
"""*The results of the search returned as a python list of dictionaries*
**Usage:**
.. code-block:: python
sources = tns.sources
"""
sourceResultsList = []
sourceResultsList[:] = [dict(l) for l in self.sourceResultsList]
return sourceResultsList
@property
def spectra(
self):
"""*The associated source spectral data*
**Usage:**
.. code-block:: python
sourceSpectra = tns.spectra
"""
specResultsList = []
specResultsList[:] = [dict(l) for l in self.specResultsList]
return specResultsList
@property
def files(
self):
"""*The associated source files*
**Usage:**
.. code-block:: python
sourceFiles = tns.files
"""
relatedFilesResultsList = []
relatedFilesResultsList[:] = [dict(l)
for l in self.relatedFilesResultsList]
return relatedFilesResultsList
@property
def photometry(
self):
"""*The associated source photometry*
**Usage:**
.. code-block:: python
sourcePhotometry = tns.photometry
"""
photResultsList = []
photResultsList[:] = [dict(l) for l in self.photResultsList]
return photResultsList
@property
def url(
self):
"""*The generated URL used for searching of the TNS*
**Usage:**
.. code-block:: python
searchURL = tns.url
"""
return self._searchURL
def csv(
self,
dirPath=None):
"""*Render the results in csv format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `csvSources` -- the top-level transient data
- `csvPhot` -- all photometry associated with the transients
- `csvSpec` -- all spectral data associated with the transients
- `csvFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in csv format:
.. code-block:: python
csvSources, csvPhot, csvSpec, csvFiles = tns.csv()
print csvSources
.. code-block:: text
TNSId,TNSName,discoveryName,discSurvey,raSex,decSex,raDeg,decDeg,transRedshift,specType,discMag,discMagFilter,discDate,objectUrl,hostName,hostRedshift,separationArcsec,separationNorthArcsec,separationEastArcsec
2016asf,SN2016asf,ASASSN-16cs,ASAS-SN,06:50:36.73,+31:06:45.36,102.6530,31.1126,0.021,SN Ia,17.1,V-Johnson,2016-03-06 08:09:36,http://wis-tns.weizmann.ac.il/object/2016asf,KUG 0647+311,,0.66,0.65,-0.13
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.csv("~/tns")
.. image:: https://i.imgur.com/BwwqMBg.png
:width: 800px
:alt: csv output
"""
if dirPath:
p = self._file_prefix()
csvSources = self.sourceResults.csv(
filepath=dirPath + "/" + p + "sources.csv")
csvPhot = self.photResults.csv(
filepath=dirPath + "/" + p + "phot.csv")
csvSpec = self.specResults.csv(
filepath=dirPath + "/" + p + "spec.csv")
csvFiles = self.relatedFilesResults.csv(
filepath=dirPath + "/" + p + "relatedFiles.csv")
else:
csvSources = self.sourceResults.csv()
csvPhot = self.photResults.csv()
csvSpec = self.specResults.csv()
csvFiles = self.relatedFilesResults.csv()
return csvSources, csvPhot, csvSpec, csvFiles
def json(
self,
dirPath=None):
"""*Render the results in json format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `jsonSources` -- the top-level transient data
- `jsonPhot` -- all photometry associated with the transients
- `jsonSpec` -- all spectral data associated with the transients
- `jsonFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in json format:
.. code-block:: python
jsonSources, jsonPhot, jsonSpec, jsonFiles = tns.json()
print jsonSources
.. code-block:: text
[
{
"TNSId": "2016asf",
"TNSName": "SN2016asf",
"decDeg": 31.1126,
"decSex": "+31:06:45.36",
"discDate": "2016-03-06 08:09:36",
"discMag": "17.1",
"discMagFilter": "V-Johnson",
"discSurvey": "ASAS-SN",
"discoveryName": "ASASSN-16cs",
"hostName": "KUG 0647+311",
"hostRedshift": null,
"objectUrl": "http://wis-tns.weizmann.ac.il/object/2016asf",
"raDeg": 102.65304166666667,
"raSex": "06:50:36.73",
"separationArcsec": "0.66",
"separationEastArcsec": "-0.13",
"separationNorthArcsec": "0.65",
"specType": "SN Ia",
"transRedshift": "0.021"
}
]
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.json("~/tns")
.. image:: https://i.imgur.com/wAHqARI.png
:width: 800px
:alt: json output
"""
if dirPath:
p = self._file_prefix()
jsonSources = self.sourceResults.json(
filepath=dirPath + "/" + p + "sources.json")
jsonPhot = self.photResults.json(
filepath=dirPath + "/" + p + "phot.json")
jsonSpec = self.specResults.json(
filepath=dirPath + "/" + p + "spec.json")
jsonFiles = self.relatedFilesResults.json(
filepath=dirPath + "/" + p + "relatedFiles.json")
else:
jsonSources = self.sourceResults.json()
jsonPhot = self.photResults.json()
jsonSpec = self.specResults.json()
jsonFiles = self.relatedFilesResults.json()
return jsonSources, jsonPhot, jsonSpec, jsonFiles
def yaml(
self,
dirPath=None):
"""*Render the results in yaml format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `yamlSources` -- the top-level transient data
- `yamlPhot` -- all photometry associated with the transients
- `yamlSpec` -- all spectral data associated with the transients
- `yamlFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in yaml format:
.. code-block:: python
yamlSources, yamlPhot, yamlSpec, yamlFiles = tns.yaml()
print yamlSources
.. code-block:: text
- TNSId: 2016asf
TNSName: SN2016asf
decDeg: 31.1126
decSex: '+31:06:45.36'
discDate: '2016-03-06 08:09:36'
discMag: '17.1'
discMagFilter: V-Johnson
discSurvey: ASAS-SN
discoveryName: ASASSN-16cs
hostName: KUG 0647+311
hostRedshift: null
objectUrl: http://wis-tns.weizmann.ac.il/object/2016asf
raDeg: 102.65304166666667
raSex: '06:50:36.73'
separationArcsec: '0.66'
separationEastArcsec: '-0.13'
separationNorthArcsec: '0.65'
specType: SN Ia
transRedshift: '0.021'
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.yaml("~/tns")
.. image:: https://i.imgur.com/ZpJIC6p.png
:width: 800px
:alt: yaml output
"""
if dirPath:
p = self._file_prefix()
yamlSources = self.sourceResults.yaml(
filepath=dirPath + "/" + p + "sources.yaml")
yamlPhot = self.photResults.yaml(
filepath=dirPath + "/" + p + "phot.yaml")
yamlSpec = self.specResults.yaml(
filepath=dirPath + "/" + p + "spec.yaml")
yamlFiles = self.relatedFilesResults.yaml(
filepath=dirPath + "/" + p + "relatedFiles.yaml")
else:
yamlSources = self.sourceResults.yaml()
yamlPhot = self.photResults.yaml()
yamlSpec = self.specResults.yaml()
yamlFiles = self.relatedFilesResults.yaml()
return yamlSources, yamlPhot, yamlSpec, yamlFiles
def markdown(
self,
dirPath=None):
"""*Render the results in markdown format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `markdownSources` -- the top-level transient data
- `markdownPhot` -- all photometry associated with the transients
- `markdownSpec` -- all spectral data associated with the transients
- `markdownFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in markdown table format:
.. code-block:: python
markdownSources, markdownPhot, markdownSpec, markdownFiles = tns.markdown()
print markdownSources
.. code-block:: text
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
|:---------|:-----------|:---------------|:------------|:-------------|:--------------|:----------|:---------|:---------------|:----------|:---------|:---------------|:---------------------|:----------------------------------------------|:--------------|:--------------|:------------------|:-----------------------|:----------------------|
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.markdown("~/tns")
.. image:: https://i.imgur.com/AYLBQoJ.png
:width: 800px
:alt: markdown output
"""
if dirPath:
p = self._file_prefix()
markdownSources = self.sourceResults.markdown(
filepath=dirPath + "/" + p + "sources.md")
markdownPhot = self.photResults.markdown(
filepath=dirPath + "/" + p + "phot.md")
markdownSpec = self.specResults.markdown(
filepath=dirPath + "/" + p + "spec.md")
markdownFiles = self.relatedFilesResults.markdown(
filepath=dirPath + "/" + p + "relatedFiles.md")
else:
markdownSources = self.sourceResults.markdown()
markdownPhot = self.photResults.markdown()
markdownSpec = self.specResults.markdown()
markdownFiles = self.relatedFilesResults.markdown()
return markdownSources, markdownPhot, markdownSpec, markdownFiles
def table(
self,
dirPath=None):
"""*Render the results as an ascii table*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `tableSources` -- the top-level transient data
- `tablePhot` -- all photometry associated with the transients
- `tableSpec` -- all spectral data associated with the transients
- `tableFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in ascii table format:
.. code-block:: python
tableSources, tablePhot, tableSpec, tableFiles = tns.table()
print tableSources
.. code-block:: text
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.table("~/tns")
.. image:: https://i.imgur.com/m09M0ho.png
:width: 800px
:alt: ascii files
"""
if dirPath:
p = self._file_prefix()
tableSources = self.sourceResults.table(
filepath=dirPath + "/" + p + "sources.ascii")
tablePhot = self.photResults.table(
filepath=dirPath + "/" + p + "phot.ascii")
tableSpec = self.specResults.table(
filepath=dirPath + "/" + p + "spec.ascii")
tableFiles = self.relatedFilesResults.table(
filepath=dirPath + "/" + p + "relatedFiles.ascii")
else:
tableSources = self.sourceResults.table()
tablePhot = self.photResults.table()
tableSpec = self.specResults.table()
tableFiles = self.relatedFilesResults.table()
return tableSources, tablePhot, tableSpec, tableFiles
def mysql(
self,
tableNamePrefix="TNS",
dirPath=None):
"""*Render the results as MySQL Insert statements*
**Key Arguments:**
- ``tableNamePrefix`` -- the prefix for the database table names to assign the insert statements to. Default *TNS*.
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `mysqlSources` -- the top-level transient data
- `mysqlPhot` -- all photometry associated with the transients
- `mysqlSpec` -- all spectral data associated with the transients
- `mysqlFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in mysql insert format:
.. code-block:: python
mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles = tns.mysql("TNS")
print mysqlSources
.. code-block:: text
INSERT INTO `TNS_sources` (TNSId,TNSName,dateCreated,decDeg,decSex,discDate,discMag,discMagFilter,discSurvey,discoveryName,hostName,hostRedshift,objectUrl,raDeg,raSex,separationArcsec,separationEastArcsec,separationNorthArcsec,specType,transRedshift) VALUES ("2016asf" ,"SN2016asf" ,"2016-09-20T11:22:13" ,"31.1126" ,"+31:06:45.36" ,"2016-03-06 08:09:36" ,"17.1" ,"V-Johnson" ,"ASAS-SN" ,"ASASSN-16cs" ,"KUG 0647+311" ,null ,"http://wis-tns.weizmann.ac.il/object/2016asf" ,"102.653041667" ,"06:50:36.73" ,"0.66" ,"-0.13" ,"0.65" ,"SN Ia" ,"0.021") ON DUPLICATE KEY UPDATE TNSId="2016asf", TNSName="SN2016asf", dateCreated="2016-09-20T11:22:13", decDeg="31.1126", decSex="+31:06:45.36", discDate="2016-03-06 08:09:36", discMag="17.1", discMagFilter="V-Johnson", discSurvey="ASAS-SN", discoveryName="ASASSN-16cs", hostName="KUG 0647+311", hostRedshift=null, objectUrl="http://wis-tns.weizmann.ac.il/object/2016asf", raDeg="102.653041667", raSex="06:50:36.73", separationArcsec="0.66", separationEastArcsec="-0.13", separationNorthArcsec="0.65", specType="SN Ia", transRedshift="0.021", updated=1, dateLastModified=NOW() ;
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.mysql("TNS", "~/tns")
.. image:: https://i.imgur.com/CozySPW.png
:width: 800px
:alt: mysql output
"""
if dirPath:
p = self._file_prefix()
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_sources` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(20) NOT NULL,
`TNSName` varchar(20) DEFAULT NULL,
`dateCreated` datetime DEFAULT NULL,
`decDeg` double DEFAULT NULL,
`decSex` varchar(45) DEFAULT NULL,
`discDate` datetime DEFAULT NULL,
`discMag` double DEFAULT NULL,
`discMagFilter` varchar(45) DEFAULT NULL,
`discSurvey` varchar(100) DEFAULT NULL,
`discoveryName` varchar(100) DEFAULT NULL,
`objectUrl` varchar(200) DEFAULT NULL,
`raDeg` double DEFAULT NULL,
`raSex` varchar(45) DEFAULT NULL,
`specType` varchar(100) DEFAULT NULL,
`transRedshift` double DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`hostName` VARCHAR(100) NULL DEFAULT NULL,
`hostRedshift` DOUBLE NULL DEFAULT NULL,
`survey` VARCHAR(100) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid` (`TNSId`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlSources = self.sourceResults.mysql(
tableNamePrefix + "_sources", filepath=dirPath + "/" + p + "sources.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_photometry` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(20) NOT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`exptime` double DEFAULT NULL,
`filter` varchar(100) DEFAULT NULL,
`limitingMag` tinyint(4) DEFAULT NULL,
`mag` double DEFAULT NULL,
`magErr` double DEFAULT NULL,
`magUnit` varchar(100) DEFAULT NULL,
`objectName` varchar(100) DEFAULT NULL,
`obsdate` datetime DEFAULT NULL,
`reportAddedDate` datetime DEFAULT NULL,
`suggestedType` varchar(100) DEFAULT NULL,
`survey` varchar(100) DEFAULT NULL,
`telescope` varchar(100) DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`remarks` VARCHAR(800) NULL DEFAULT NULL,
`sourceComment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid_survey_obsdate` (`TNSId`,`survey`,`obsdate`),
UNIQUE INDEX `u_tnsid_survey_obsdate` (`TNSId` ASC, `survey` ASC, `obsdate` ASC),
UNIQUE INDEX `u_tnsid_obsdate_objname` (`TNSId` ASC, `obsdate` ASC, `objectName` ASC)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlPhot = self.photResults.mysql(
tableNamePrefix + "_photometry", filepath=dirPath + "/" + p + "phot.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_spectra` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(45) NOT NULL,
`TNSuser` varchar(45) DEFAULT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`exptime` double DEFAULT NULL,
`obsdate` datetime DEFAULT NULL,
`reportAddedDate` datetime DEFAULT NULL,
`specType` varchar(100) DEFAULT NULL,
`survey` varchar(100) DEFAULT NULL,
`telescope` varchar(100) DEFAULT NULL,
`transRedshift` double DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`remarks` VARCHAR(800) NULL DEFAULT NULL,
`sourceComment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `u_tnsid_survey_obsdate` (`TNSId`,`survey`,`obsdate`),
UNIQUE KEY `u_id_user_obsdate` (`TNSId`,`TNSuser`,`obsdate`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlSpec = self.specResults.mysql(
tableNamePrefix + "_spectra", filepath=dirPath + "/" + p + "spec.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_files` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(100) NOT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`dateObs` datetime DEFAULT NULL,
`filename` varchar(200) DEFAULT NULL,
`spec1phot2` tinyint(4) DEFAULT NULL,
`url` varchar(800) DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`comment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid_url` (`TNSId`,`url`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlFiles = self.relatedFilesResults.mysql(
tableNamePrefix + "_files", filepath=dirPath + "/" + p + "relatedFiles.sql", createStatement=createStatement)
else:
mysqlSources = self.sourceResults.mysql(
tableNamePrefix + "_sources")
mysqlPhot = self.photResults.mysql(tableNamePrefix + "_photometry")
mysqlSpec = self.specResults.mysql(tableNamePrefix + "_spectra")
mysqlFiles = self.relatedFilesResults.mysql(
tableNamePrefix + "_files")
return mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles
def _query_tns(self):
"""
*determine how to query the TNS, send query and parse the results*
**Return:**
- ``results`` -- a list of dictionaries (one dictionary for each result set returned from the TNS)
"""
self.log.info('starting the ``get`` method')
sourceTable = []
photoTable = []
specTable = []
relatedFilesTable = []
# THIS stop IS TO KEEP TRACK OF THE TNS PAGINATION IF MANY RESULT PAGES
# ARE RETURNED
stop = False
sourceCount = 0
while not stop:
status_code, content, self._searchURL = self._get_tns_search_results()
if status_code != 200:
self.log.error(
'cound not get the search reuslts from the TNS, HTML error code %(status_code)s ' % locals())
return None
if "No results found" in content:
print "No results found"
return sourceTable, photoTable, specTable, relatedFilesTable
if self._parse_transient_rows(content, True) < self.batchSize:
stop = True
else:
self.page += 1
thisPage = self.page
print "Downloaded %(thisPage)s page(s) from the TNS. %(sourceCount)s transients parsed so far." % locals()
sourceCount += self.batchSize
print "\t" + self._searchURL
timesleep.sleep(1)
# PARSE ALL ROWS RETURNED
for transientRow in self._parse_transient_rows(content):
# TOP LEVEL DISCOVERY CONTENT
sourceContent = transientRow.group()
discInfo, TNSId = self._parse_discovery_information(
sourceContent)
sourceTable.append(discInfo)
# PHOTOMETERY
phot, relatedFiles = self._parse_photometry_data(
sourceContent, TNSId)
photoTable += phot
relatedFilesTable += relatedFiles
# SPECTRA
spec, relatedFiles = self._parse_spectral_data(
sourceContent, TNSId)
specTable += spec
relatedFilesTable += relatedFiles
# SORT BY SEPARATION FROM THE SEARCH COORDINATES
try:
sourceTable = sorted(sourceTable, key=itemgetter(
'separationArcsec'), reverse=False)
except:
pass
self.log.info('completed the ``get`` method')
return sourceTable, photoTable, specTable, relatedFilesTable
def _file_prefix(
self):
"""*Generate a file prefix based on the type of search for saving files to disk*
**Return:**
- ``prefix`` -- the file prefix
"""
self.log.info('starting the ``_file_prefix`` method')
if self.ra:
now = datetime.now()
prefix = now.strftime("%Y%m%dt%H%M%S%f_tns_conesearch_")
elif self.name:
prefix = self.name + "_tns_conesearch_"
elif self.internal_name:
prefix = self.internal_name + "_tns_conesearch_"
elif self.discInLastDays:
discInLastDays = str(self.discInLastDays)
now = datetime.now()
prefix = now.strftime(
discInLastDays + "d_since_%Y%m%d_tns_conesearch_")
self.log.info('completed the ``_file_prefix`` method')
return prefix
def _parse_transient_rows(
self,
content,
count=False):
"""* parse transient rows from the TNS result page content*
**Key Arguments:**
- ``content`` -- the content from the TNS results page.
- ``count`` -- return only the number of rows
**Return:**
- ``transientRows``
"""
self.log.info('starting the ``_parse_transient_rows`` method')
regexForRow = r"""\n([^\n]*?<a href="/object/.*?)(?=\n[^\n]*?<a href="/object/|<\!\-\- /\.section, /#content \-\->)"""
if count:
# A SINGLE SOURCE BLOCK
matchedSources = re.findall(
regexForRow,
content,
flags=re.S # re.S
)
return len(matchedSources)
# A SINGLE SOURCE BLOCK
matchedSources = re.finditer(
regexForRow,
content,
flags=re.S # re.S
)
self.log.info('completed the ``_parse_transient_rows`` method')
return matchedSources
def _parse_discovery_information(
self,
content):
"""* parse discovery information from one row on the TNS results page*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page.
**Return:**
- ``discoveryData`` -- dictionary of results
- ``TNSId`` -- the unique TNS id for the transient
"""
self.log.info('starting the ``_parse_discovery_information`` method')
# ASTROCALC UNIT CONVERTER OBJECT
converter = unit_conversion(
log=self.log
)
matches = re.finditer(
r"""<tr class="row-.*?"><td class="cell-id">(?P<tnsId>\d*?)</td><td class="cell-name"><a href="(?P<objectUrl>.*?)">(?P<TNSName>.*?)</a></td><td class="cell-.*?<td class="cell-ra">(?P<raSex>.*?)</td><td class="cell-decl">(?P<decSex>.*?)</td><td class="cell-ot_name">(?P<specType>.*?)</td><td class="cell-redshift">(?P<transRedshift>.*?)</td><td class="cell-hostname">(?P<hostName>.*?)</td><td class="cell-host_redshift">(?P<hostRedshift>.*?)</td><td class="cell-source_group_name">(?P<discSurvey>.*?)</td>.*?<td class="cell-internal_name">(<a.*?>)?(?P<discoveryName>.*?)(</a>)?</td>.*?<td class="cell-discoverymag">(?P<discMag>.*?)</td><td class="cell-disc_filter_name">(?P<discMagFilter>.*?)</td><td class="cell-discoverydate">(?P<discDate>.*?)</td><td class="cell-discoverer">(?P<sender>.*?)</td>.*?</tr>""",
content,
flags=0 # re.S
)
discoveryData = []
for match in matches:
row = match.groupdict()
for k, v in row.iteritems():
row[k] = v.strip()
if len(v) == 0:
row[k] = None
if row["transRedshift"] == 0:
row["transRedshift"] = None
if row["TNSName"][0] in ["1", "2"]:
row["TNSName"] = "SN" + row["TNSName"]
row["objectUrl"] = "http://wis-tns.weizmann.ac.il" + \
row["objectUrl"]
# CONVERT COORDINATES TO DECIMAL DEGREES
row["raDeg"] = converter.ra_sexegesimal_to_decimal(
ra=row["raSex"]
)
row["decDeg"] = converter.dec_sexegesimal_to_decimal(
dec=row["decSex"]
)
# IF THIS IS A COORDINATE SEARCH, ADD SEPARATION FROM
# ORIGINAL QUERY COORDINATES
if self.ra:
# CALCULATE SEPARATION IN ARCSEC
from astrocalc.coords import separations
calculator = separations(
log=self.log,
ra1=self.ra,
dec1=self.dec,
ra2=row["raDeg"],
dec2=row["decDeg"],
)
angularSeparation, north, east = calculator.get()
row["separationArcsec"] = angularSeparation
row["separationNorthArcsec"] = north
row["separationEastArcsec"] = east
if not row["discSurvey"]:
row["survey"] = row["sender"]
del row["sender"]
del row["tnsId"]
row["TNSName"] = row["TNSName"].replace(" ", "")
row["TNSId"] = row["TNSName"].replace(
"SN", "").replace("AT", "")
TNSId = row["TNSId"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "TNSName", "discoveryName", "discSurvey", "raSex", "decSex", "raDeg", "decDeg",
"transRedshift", "specType", "discMag", "discMagFilter", "discDate", "objectUrl", "hostName", "hostRedshift", "separationArcsec", "separationNorthArcsec", "separationEastArcsec"]
for k, v in row.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = row[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
discoveryData.append(row)
self.log.info('completed the ``_parse_discovery_information`` method')
return discoveryData[0], TNSId
def _parse_photometry_data(
self,
content,
TNSId):
"""*parse photometry data from a row in the tns results content*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page
- ``TNSId`` -- the tns id of the transient
**Return:**
- ``photData`` -- a list of dictionaries of the photometry data
- ``relatedFilesTable`` -- a list of dictionaries of transient photometry related files
"""
self.log.info('starting the ``_parse_photometry_data`` method')
photData = []
relatedFilesTable = []
# AT REPORT BLOCK
ATBlock = re.search(
r"""<tr class=[^\n]*?AT reportings.*?(?=<tr class=[^\n]*?Classification reportings|$)""",
content,
flags=re.S # re.S
)
if ATBlock:
ATBlock = ATBlock.group()
reports = re.finditer(
r"""<tr class="row-[^"]*"><td class="cell-id">.*?</table>""",
ATBlock,
flags=re.S # re.S
)
relatedFiles = self._parse_related_files(ATBlock)
for r in reports:
header = re.search(
r"""<tr class="row[^"]*".*?time_received">(?P<reportAddedDate>[^<]*).*?user_name">(?P<sender>[^<]*).*?reporter_name">(?P<reporters>[^<]*).*?source_group_name">(?P<surveyGroup>[^<]*).*?ra">(?P<ra>[^<]*).*?decl">(?P<dec>[^<]*).*?discovery_date">(?P<obsDate>[^<]*).*?flux">(?P<mag>[^<]*).*?filter_name">(?P<magFilter>[^<]*).*?related_files">(?P<relatedFiles>[^<]*).*?type_name">(?P<suggestedType>[^<]*).*?hostname">(?P<hostName>[^<]*).*?host_redshift">(?P<hostRedshift>[^<]*).*?internal_name">(?P<objectName>[^<]*).*?groups">(?P<survey>[^<]*).*?remarks">(?P<sourceComment>[^<]*)""",
r.group(),
flags=0 # re.S
)
try:
header = header.groupdict()
except:
print r.group()
header["TNSId"] = TNSId
del header["reporters"]
del header["surveyGroup"]
del header["hostName"]
del header["hostRedshift"]
del header["mag"]
del header["magFilter"]
del header["obsDate"]
del header["ra"]
del header["dec"]
if not self.comments:
del header['sourceComment']
else:
theseComments = header[
"sourceComment"].split("\n")
header["sourceComment"] = ""
for c in theseComments:
header["sourceComment"] += " " + c.strip()
header["sourceComment"] = header[
"sourceComment"].strip().replace('"', "'")[0:750]
phot = re.finditer(
r"""<tr class="row\-[^"]*".*?obsdate">(?P<obsdate>[^<]*).*?flux">(?P<mag>[^<]*).*?fluxerr">(?P<magErr>[^<]*).*?limflux">(?P<limitingMag>[^<]*).*?unit_name">(?P<magUnit>[^<]*).*?filter_name">(?P<filter>[^<]*).*?tel_inst">(?P<telescope>[^<]*).*?exptime">(?P<exptime>[^<]*).*?observer">(?P<observer>[^<]*).*?-remarks">(?P<remarks>[^<]*)""",
r.group(),
flags=0 # re.S
)
filesAppended = False
for p in phot:
p = p.groupdict()
del p["observer"]
if p["limitingMag"] and not p["mag"]:
p["mag"] = p["limitingMag"]
p["limitingMag"] = 1
p["remarks"] = p["remarks"].replace(
"[Last non detection]", "")
else:
p["limitingMag"] = 0
if not self.comments:
del p["remarks"]
p.update(header)
if p["relatedFiles"] and filesAppended == False:
filesAppended = True
for f in relatedFiles:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = f[
"filepath"].split("/")[-1]
thisFile["url"] = f["filepath"]
if self.comments:
thisFile["comment"] = f[
"fileComment"].replace("\n", " ").strip().replace('"', "'")[0:750]
thisFile["dateObs"] = p["obsdate"]
thisFile["spec1phot2"] = 2
relatedFilesTable.append(thisFile)
if not p["survey"] and not p["objectName"]:
p["survey"] = p["sender"]
del p["relatedFiles"]
del p["sender"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "survey", "obsdate", "filter", "limitingMag", "mag", "magErr",
"magUnit", "suggestedType", "telescope", "exptime", "reportAddedDate"]
for k, v in p.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = p[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
photData.append(orow)
self.log.info('completed the ``_parse_photometry_data`` method')
return photData, relatedFilesTable
def _parse_related_files(
self,
content):
"""*parse the contents for related files URLs and comments*
**Key Arguments:**
- ``content`` -- the content to parse.
**Return:**
- ``relatedFiles`` -- a list of dictionaries of transient related files
"""
self.log.info('starting the ``_parse_related_files`` method')
relatedFilesList = re.finditer(
r"""<td class="cell-filename">.*?href="(?P<filepath>[^"]*).*?remarks">(?P<fileComment>[^<]*)""",
content,
flags=0 # re.S
)
relatedFiles = []
for f in relatedFilesList:
f = f.groupdict()
relatedFiles.append(f)
self.log.info('completed the ``_parse_related_files`` method')
return relatedFiles
def _parse_spectral_data(
self,
content,
TNSId):
"""*parse spectra data from a row in the tns results content*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page
- ``TNSId`` -- the tns id of the transient
**Return:**
- ``specData`` -- a list of dictionaries of the spectral data
- ``relatedFilesTable`` -- a list of dictionaries of transient spectrum related files
"""
self.log.info('starting the ``_parse_spectral_data`` method')
specData = []
relatedFilesTable = []
# CLASSIFICATION BLOCK
classBlock = re.search(
r"""<tr class=[^\n]*?Classification reportings.*$""",
content,
flags=re.S # re.S
)
if classBlock:
classBlock = classBlock.group()
reports = re.finditer(
r"""<tr class="row-[^"]*"><td class="cell-id">.*?</tbody>\s*</table>\s*</div></td> </tr>\s*</tbody>\s*</table>\s*</div></td> </tr>""",
classBlock,
flags=re.S #
)
relatedFiles = self._parse_related_files(classBlock)
for r in reports:
header = re.search(
r"""<tr class="row.*?time_received">(?P<reportAddedDate>[^<]*).*?user_name">(?P<TNSuser>[^<]*).*?classifier_name">(?P<reporters>[^<]*).*?source_group_name">(?P<survey>[^<]*).*?-type">(?P<specType>[^<]*).*?-redshift">(?P<transRedshift>[^<]*).*?-related_files">(?P<relatedFiles>[^<]*).*?-groups">(?P<surveyGroup>[^<]*).*?-remarks">(?P<sourceComment>[^<]*)</td>""",
r.group(),
flags=re.S # re.S
)
if not header:
continue
header = header.groupdict()
header["TNSId"] = TNSId
del header["reporters"]
del header["surveyGroup"]
del header["survey"]
if not self.comments:
del header['sourceComment']
else:
theseComments = header[
"sourceComment"].split("\n")
header["sourceComment"] = ""
for c in theseComments:
header["sourceComment"] += " " + c.strip()
header["sourceComment"] = header[
"sourceComment"].strip().replace('"', "'")[0:750]
spec = re.finditer(
r"""<tr class="class-results-.*?-obsdate">(?P<obsdate>[^<]*).*?-tel_inst">(?P<telescope>[^<]*).*?-exptime">(?P<exptime>[^<]*).*?-observer">(?P<sender>[^<]*).*?-reducer">(?P<reducer>[^<]*).*?-source_group_name">(?P<survey>[^<]*).*?-asciifile">(.*?<a href="(?P<filepath>[^"]*)".*?</a>)?.*?-fitsfile">(.*?<a href="(?P<fitsFilepath>[^"]*)".*?</a>)?.*?-groups">(?P<surveyGroup>[^<]*).*?-remarks">(?P<remarks>[^<]*)""",
r.group(),
flags=0 # re.S
)
filesAppended = False
for s in spec:
s = s.groupdict()
del s["sender"]
del s["surveyGroup"]
del s["reducer"]
if not self.comments:
del s["remarks"]
else:
s["remarks"] = s["remarks"].replace('"', "'")[0:750]
s.update(header)
if s["relatedFiles"] and filesAppended == False:
filesAppended = True
for f in relatedFiles:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = f[
"filepath"].split("/")[-1]
thisFile["url"] = f["filepath"]
if self.comments:
thisFile["comment"] = f[
"fileComment"].replace("\n", " ").strip()
thisFile["dateObs"] = s["obsdate"]
thisFile["spec1phot2"] = 1
relatedFilesTable.append(thisFile)
for ffile in [s["filepath"], s["fitsFilepath"]]:
if ffile:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = ffile.split(
"/")[-1]
thisFile["url"] = ffile
if self.comments:
thisFile["comment"] = ""
thisFile["dateObs"] = s["obsdate"]
thisFile["spec1phot2"] = 1
relatedFilesTable.append(thisFile)
del s["filepath"]
del s["fitsFilepath"]
del s["relatedFiles"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "survey", "obsdate", "specType", "transRedshift",
"telescope", "exptime", "reportAddedDate", "TNSuser"]
for k, v in s.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = s[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
specData.append(orow)
self.log.info('completed the ``_parse_spectral_data`` method')
return specData, relatedFilesTable
|
thespacedoctor/transientNamer
|
transientNamer/search.py
|
search._file_prefix
|
python
|
def _file_prefix(
self):
self.log.info('starting the ``_file_prefix`` method')
if self.ra:
now = datetime.now()
prefix = now.strftime("%Y%m%dt%H%M%S%f_tns_conesearch_")
elif self.name:
prefix = self.name + "_tns_conesearch_"
elif self.internal_name:
prefix = self.internal_name + "_tns_conesearch_"
elif self.discInLastDays:
discInLastDays = str(self.discInLastDays)
now = datetime.now()
prefix = now.strftime(
discInLastDays + "d_since_%Y%m%d_tns_conesearch_")
self.log.info('completed the ``_file_prefix`` method')
return prefix
|
*Generate a file prefix based on the type of search for saving files to disk*
**Return:**
- ``prefix`` -- the file prefix
|
train
|
https://github.com/thespacedoctor/transientNamer/blob/39be410c84275ed4669632f5df67e728d66a318f/transientNamer/search.py#L820-L843
| null |
class search():
"""
*The worker class for the transient namer search module*
**Key Arguments:**
- ``log`` -- logger
- ``settings`` -- the settings dictionary
- ``ra`` -- RA of the location being checked
- ``dec`` -- DEC of the location being searched
- ``radiusArcsec`` - the radius of the conesearch to perform against the TNS
- ``name`` -- name of the object to search the TNS for
- ``discInLastDays`` -- search the TNS for transient discovered in the last X days
- ``comments`` -- print the comments from the TNS, note these can be long making table outputs somewhat unreadable. Default *False*
**Usage:**
To initiate a search object to search the TNS via an object name (either TNS or survey names accepted):
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
name="Gaia16bbi"
)
or for a conesearch use something similar to:
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
ra="06:50:36.74",
dec="+31:06:44.7",
radiusArcsec=5
)
Note the search method can accept coordinates in sexagesimal or decimal defree formats.
To list all new objects discovered in the last three weeks, then use:
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
discInLastDays=21
)
"""
# Initialisation
def __init__(
self,
log,
ra="",
dec="",
radiusArcsec="",
name="",
discInLastDays="",
settings=False,
comments=False
):
self.log = log
log.debug("instansiating a new 'search' object")
self.settings = settings
self.ra = ra
self.dec = dec
self.radiusArcsec = radiusArcsec
self.comments = comments
self.name = name
self.internal_name = ""
self.discInLastDays = discInLastDays
self.page = 0
self.batchSize = 1000
# CREATE THE TIME-RANGE WINDOW TO SEARCH TNS
if not discInLastDays:
self.start = ""
self.end = ""
else:
discInLastDays = int(discInLastDays)
td = timedelta(days=1)
end = datetime.now() + td
self.end = end.strftime("%Y-%m-%d")
td = timedelta(days=discInLastDays)
start = datetime.now() - td
self.start = start.strftime("%Y-%m-%d")
# DETERMINE IF WE HAVE A TNS OR INTERAL SURVEY NAME
if self.name:
matchObject = re.match(r'^((SN|AT) ?)?(\d{4}\w{1,6})', self.name)
if matchObject:
self.name = matchObject.group(3)
else:
self.internal_name = self.name
self.name = ""
# DO THE SEARCH OF THE TNS AND COMPILE THE RESULTS INTO SEPARATE RESULT
# SETS
self.sourceResultsList, self.photResultsList, self.specResultsList, self.relatedFilesResultsList = self._query_tns()
self.sourceResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.sourceResultsList
)
self.photResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.photResultsList
)
self.specResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.specResultsList
)
self.relatedFilesResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.relatedFilesResultsList
)
return None
@property
def sources(
self):
"""*The results of the search returned as a python list of dictionaries*
**Usage:**
.. code-block:: python
sources = tns.sources
"""
sourceResultsList = []
sourceResultsList[:] = [dict(l) for l in self.sourceResultsList]
return sourceResultsList
@property
def spectra(
self):
"""*The associated source spectral data*
**Usage:**
.. code-block:: python
sourceSpectra = tns.spectra
"""
specResultsList = []
specResultsList[:] = [dict(l) for l in self.specResultsList]
return specResultsList
@property
def files(
self):
"""*The associated source files*
**Usage:**
.. code-block:: python
sourceFiles = tns.files
"""
relatedFilesResultsList = []
relatedFilesResultsList[:] = [dict(l)
for l in self.relatedFilesResultsList]
return relatedFilesResultsList
@property
def photometry(
self):
"""*The associated source photometry*
**Usage:**
.. code-block:: python
sourcePhotometry = tns.photometry
"""
photResultsList = []
photResultsList[:] = [dict(l) for l in self.photResultsList]
return photResultsList
@property
def url(
self):
"""*The generated URL used for searching of the TNS*
**Usage:**
.. code-block:: python
searchURL = tns.url
"""
return self._searchURL
def csv(
self,
dirPath=None):
"""*Render the results in csv format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `csvSources` -- the top-level transient data
- `csvPhot` -- all photometry associated with the transients
- `csvSpec` -- all spectral data associated with the transients
- `csvFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in csv format:
.. code-block:: python
csvSources, csvPhot, csvSpec, csvFiles = tns.csv()
print csvSources
.. code-block:: text
TNSId,TNSName,discoveryName,discSurvey,raSex,decSex,raDeg,decDeg,transRedshift,specType,discMag,discMagFilter,discDate,objectUrl,hostName,hostRedshift,separationArcsec,separationNorthArcsec,separationEastArcsec
2016asf,SN2016asf,ASASSN-16cs,ASAS-SN,06:50:36.73,+31:06:45.36,102.6530,31.1126,0.021,SN Ia,17.1,V-Johnson,2016-03-06 08:09:36,http://wis-tns.weizmann.ac.il/object/2016asf,KUG 0647+311,,0.66,0.65,-0.13
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.csv("~/tns")
.. image:: https://i.imgur.com/BwwqMBg.png
:width: 800px
:alt: csv output
"""
if dirPath:
p = self._file_prefix()
csvSources = self.sourceResults.csv(
filepath=dirPath + "/" + p + "sources.csv")
csvPhot = self.photResults.csv(
filepath=dirPath + "/" + p + "phot.csv")
csvSpec = self.specResults.csv(
filepath=dirPath + "/" + p + "spec.csv")
csvFiles = self.relatedFilesResults.csv(
filepath=dirPath + "/" + p + "relatedFiles.csv")
else:
csvSources = self.sourceResults.csv()
csvPhot = self.photResults.csv()
csvSpec = self.specResults.csv()
csvFiles = self.relatedFilesResults.csv()
return csvSources, csvPhot, csvSpec, csvFiles
def json(
self,
dirPath=None):
"""*Render the results in json format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `jsonSources` -- the top-level transient data
- `jsonPhot` -- all photometry associated with the transients
- `jsonSpec` -- all spectral data associated with the transients
- `jsonFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in json format:
.. code-block:: python
jsonSources, jsonPhot, jsonSpec, jsonFiles = tns.json()
print jsonSources
.. code-block:: text
[
{
"TNSId": "2016asf",
"TNSName": "SN2016asf",
"decDeg": 31.1126,
"decSex": "+31:06:45.36",
"discDate": "2016-03-06 08:09:36",
"discMag": "17.1",
"discMagFilter": "V-Johnson",
"discSurvey": "ASAS-SN",
"discoveryName": "ASASSN-16cs",
"hostName": "KUG 0647+311",
"hostRedshift": null,
"objectUrl": "http://wis-tns.weizmann.ac.il/object/2016asf",
"raDeg": 102.65304166666667,
"raSex": "06:50:36.73",
"separationArcsec": "0.66",
"separationEastArcsec": "-0.13",
"separationNorthArcsec": "0.65",
"specType": "SN Ia",
"transRedshift": "0.021"
}
]
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.json("~/tns")
.. image:: https://i.imgur.com/wAHqARI.png
:width: 800px
:alt: json output
"""
if dirPath:
p = self._file_prefix()
jsonSources = self.sourceResults.json(
filepath=dirPath + "/" + p + "sources.json")
jsonPhot = self.photResults.json(
filepath=dirPath + "/" + p + "phot.json")
jsonSpec = self.specResults.json(
filepath=dirPath + "/" + p + "spec.json")
jsonFiles = self.relatedFilesResults.json(
filepath=dirPath + "/" + p + "relatedFiles.json")
else:
jsonSources = self.sourceResults.json()
jsonPhot = self.photResults.json()
jsonSpec = self.specResults.json()
jsonFiles = self.relatedFilesResults.json()
return jsonSources, jsonPhot, jsonSpec, jsonFiles
def yaml(
self,
dirPath=None):
"""*Render the results in yaml format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `yamlSources` -- the top-level transient data
- `yamlPhot` -- all photometry associated with the transients
- `yamlSpec` -- all spectral data associated with the transients
- `yamlFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in yaml format:
.. code-block:: python
yamlSources, yamlPhot, yamlSpec, yamlFiles = tns.yaml()
print yamlSources
.. code-block:: text
- TNSId: 2016asf
TNSName: SN2016asf
decDeg: 31.1126
decSex: '+31:06:45.36'
discDate: '2016-03-06 08:09:36'
discMag: '17.1'
discMagFilter: V-Johnson
discSurvey: ASAS-SN
discoveryName: ASASSN-16cs
hostName: KUG 0647+311
hostRedshift: null
objectUrl: http://wis-tns.weizmann.ac.il/object/2016asf
raDeg: 102.65304166666667
raSex: '06:50:36.73'
separationArcsec: '0.66'
separationEastArcsec: '-0.13'
separationNorthArcsec: '0.65'
specType: SN Ia
transRedshift: '0.021'
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.yaml("~/tns")
.. image:: https://i.imgur.com/ZpJIC6p.png
:width: 800px
:alt: yaml output
"""
if dirPath:
p = self._file_prefix()
yamlSources = self.sourceResults.yaml(
filepath=dirPath + "/" + p + "sources.yaml")
yamlPhot = self.photResults.yaml(
filepath=dirPath + "/" + p + "phot.yaml")
yamlSpec = self.specResults.yaml(
filepath=dirPath + "/" + p + "spec.yaml")
yamlFiles = self.relatedFilesResults.yaml(
filepath=dirPath + "/" + p + "relatedFiles.yaml")
else:
yamlSources = self.sourceResults.yaml()
yamlPhot = self.photResults.yaml()
yamlSpec = self.specResults.yaml()
yamlFiles = self.relatedFilesResults.yaml()
return yamlSources, yamlPhot, yamlSpec, yamlFiles
def markdown(
self,
dirPath=None):
"""*Render the results in markdown format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `markdownSources` -- the top-level transient data
- `markdownPhot` -- all photometry associated with the transients
- `markdownSpec` -- all spectral data associated with the transients
- `markdownFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in markdown table format:
.. code-block:: python
markdownSources, markdownPhot, markdownSpec, markdownFiles = tns.markdown()
print markdownSources
.. code-block:: text
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
|:---------|:-----------|:---------------|:------------|:-------------|:--------------|:----------|:---------|:---------------|:----------|:---------|:---------------|:---------------------|:----------------------------------------------|:--------------|:--------------|:------------------|:-----------------------|:----------------------|
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.markdown("~/tns")
.. image:: https://i.imgur.com/AYLBQoJ.png
:width: 800px
:alt: markdown output
"""
if dirPath:
p = self._file_prefix()
markdownSources = self.sourceResults.markdown(
filepath=dirPath + "/" + p + "sources.md")
markdownPhot = self.photResults.markdown(
filepath=dirPath + "/" + p + "phot.md")
markdownSpec = self.specResults.markdown(
filepath=dirPath + "/" + p + "spec.md")
markdownFiles = self.relatedFilesResults.markdown(
filepath=dirPath + "/" + p + "relatedFiles.md")
else:
markdownSources = self.sourceResults.markdown()
markdownPhot = self.photResults.markdown()
markdownSpec = self.specResults.markdown()
markdownFiles = self.relatedFilesResults.markdown()
return markdownSources, markdownPhot, markdownSpec, markdownFiles
def table(
self,
dirPath=None):
"""*Render the results as an ascii table*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `tableSources` -- the top-level transient data
- `tablePhot` -- all photometry associated with the transients
- `tableSpec` -- all spectral data associated with the transients
- `tableFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in ascii table format:
.. code-block:: python
tableSources, tablePhot, tableSpec, tableFiles = tns.table()
print tableSources
.. code-block:: text
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.table("~/tns")
.. image:: https://i.imgur.com/m09M0ho.png
:width: 800px
:alt: ascii files
"""
if dirPath:
p = self._file_prefix()
tableSources = self.sourceResults.table(
filepath=dirPath + "/" + p + "sources.ascii")
tablePhot = self.photResults.table(
filepath=dirPath + "/" + p + "phot.ascii")
tableSpec = self.specResults.table(
filepath=dirPath + "/" + p + "spec.ascii")
tableFiles = self.relatedFilesResults.table(
filepath=dirPath + "/" + p + "relatedFiles.ascii")
else:
tableSources = self.sourceResults.table()
tablePhot = self.photResults.table()
tableSpec = self.specResults.table()
tableFiles = self.relatedFilesResults.table()
return tableSources, tablePhot, tableSpec, tableFiles
def mysql(
self,
tableNamePrefix="TNS",
dirPath=None):
"""*Render the results as MySQL Insert statements*
**Key Arguments:**
- ``tableNamePrefix`` -- the prefix for the database table names to assign the insert statements to. Default *TNS*.
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `mysqlSources` -- the top-level transient data
- `mysqlPhot` -- all photometry associated with the transients
- `mysqlSpec` -- all spectral data associated with the transients
- `mysqlFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in mysql insert format:
.. code-block:: python
mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles = tns.mysql("TNS")
print mysqlSources
.. code-block:: text
INSERT INTO `TNS_sources` (TNSId,TNSName,dateCreated,decDeg,decSex,discDate,discMag,discMagFilter,discSurvey,discoveryName,hostName,hostRedshift,objectUrl,raDeg,raSex,separationArcsec,separationEastArcsec,separationNorthArcsec,specType,transRedshift) VALUES ("2016asf" ,"SN2016asf" ,"2016-09-20T11:22:13" ,"31.1126" ,"+31:06:45.36" ,"2016-03-06 08:09:36" ,"17.1" ,"V-Johnson" ,"ASAS-SN" ,"ASASSN-16cs" ,"KUG 0647+311" ,null ,"http://wis-tns.weizmann.ac.il/object/2016asf" ,"102.653041667" ,"06:50:36.73" ,"0.66" ,"-0.13" ,"0.65" ,"SN Ia" ,"0.021") ON DUPLICATE KEY UPDATE TNSId="2016asf", TNSName="SN2016asf", dateCreated="2016-09-20T11:22:13", decDeg="31.1126", decSex="+31:06:45.36", discDate="2016-03-06 08:09:36", discMag="17.1", discMagFilter="V-Johnson", discSurvey="ASAS-SN", discoveryName="ASASSN-16cs", hostName="KUG 0647+311", hostRedshift=null, objectUrl="http://wis-tns.weizmann.ac.il/object/2016asf", raDeg="102.653041667", raSex="06:50:36.73", separationArcsec="0.66", separationEastArcsec="-0.13", separationNorthArcsec="0.65", specType="SN Ia", transRedshift="0.021", updated=1, dateLastModified=NOW() ;
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.mysql("TNS", "~/tns")
.. image:: https://i.imgur.com/CozySPW.png
:width: 800px
:alt: mysql output
"""
if dirPath:
p = self._file_prefix()
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_sources` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(20) NOT NULL,
`TNSName` varchar(20) DEFAULT NULL,
`dateCreated` datetime DEFAULT NULL,
`decDeg` double DEFAULT NULL,
`decSex` varchar(45) DEFAULT NULL,
`discDate` datetime DEFAULT NULL,
`discMag` double DEFAULT NULL,
`discMagFilter` varchar(45) DEFAULT NULL,
`discSurvey` varchar(100) DEFAULT NULL,
`discoveryName` varchar(100) DEFAULT NULL,
`objectUrl` varchar(200) DEFAULT NULL,
`raDeg` double DEFAULT NULL,
`raSex` varchar(45) DEFAULT NULL,
`specType` varchar(100) DEFAULT NULL,
`transRedshift` double DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`hostName` VARCHAR(100) NULL DEFAULT NULL,
`hostRedshift` DOUBLE NULL DEFAULT NULL,
`survey` VARCHAR(100) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid` (`TNSId`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlSources = self.sourceResults.mysql(
tableNamePrefix + "_sources", filepath=dirPath + "/" + p + "sources.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_photometry` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(20) NOT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`exptime` double DEFAULT NULL,
`filter` varchar(100) DEFAULT NULL,
`limitingMag` tinyint(4) DEFAULT NULL,
`mag` double DEFAULT NULL,
`magErr` double DEFAULT NULL,
`magUnit` varchar(100) DEFAULT NULL,
`objectName` varchar(100) DEFAULT NULL,
`obsdate` datetime DEFAULT NULL,
`reportAddedDate` datetime DEFAULT NULL,
`suggestedType` varchar(100) DEFAULT NULL,
`survey` varchar(100) DEFAULT NULL,
`telescope` varchar(100) DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`remarks` VARCHAR(800) NULL DEFAULT NULL,
`sourceComment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid_survey_obsdate` (`TNSId`,`survey`,`obsdate`),
UNIQUE INDEX `u_tnsid_survey_obsdate` (`TNSId` ASC, `survey` ASC, `obsdate` ASC),
UNIQUE INDEX `u_tnsid_obsdate_objname` (`TNSId` ASC, `obsdate` ASC, `objectName` ASC)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlPhot = self.photResults.mysql(
tableNamePrefix + "_photometry", filepath=dirPath + "/" + p + "phot.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_spectra` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(45) NOT NULL,
`TNSuser` varchar(45) DEFAULT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`exptime` double DEFAULT NULL,
`obsdate` datetime DEFAULT NULL,
`reportAddedDate` datetime DEFAULT NULL,
`specType` varchar(100) DEFAULT NULL,
`survey` varchar(100) DEFAULT NULL,
`telescope` varchar(100) DEFAULT NULL,
`transRedshift` double DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`remarks` VARCHAR(800) NULL DEFAULT NULL,
`sourceComment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `u_tnsid_survey_obsdate` (`TNSId`,`survey`,`obsdate`),
UNIQUE KEY `u_id_user_obsdate` (`TNSId`,`TNSuser`,`obsdate`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlSpec = self.specResults.mysql(
tableNamePrefix + "_spectra", filepath=dirPath + "/" + p + "spec.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_files` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(100) NOT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`dateObs` datetime DEFAULT NULL,
`filename` varchar(200) DEFAULT NULL,
`spec1phot2` tinyint(4) DEFAULT NULL,
`url` varchar(800) DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`comment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid_url` (`TNSId`,`url`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlFiles = self.relatedFilesResults.mysql(
tableNamePrefix + "_files", filepath=dirPath + "/" + p + "relatedFiles.sql", createStatement=createStatement)
else:
mysqlSources = self.sourceResults.mysql(
tableNamePrefix + "_sources")
mysqlPhot = self.photResults.mysql(tableNamePrefix + "_photometry")
mysqlSpec = self.specResults.mysql(tableNamePrefix + "_spectra")
mysqlFiles = self.relatedFilesResults.mysql(
tableNamePrefix + "_files")
return mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles
def _query_tns(self):
"""
*determine how to query the TNS, send query and parse the results*
**Return:**
- ``results`` -- a list of dictionaries (one dictionary for each result set returned from the TNS)
"""
self.log.info('starting the ``get`` method')
sourceTable = []
photoTable = []
specTable = []
relatedFilesTable = []
# THIS stop IS TO KEEP TRACK OF THE TNS PAGINATION IF MANY RESULT PAGES
# ARE RETURNED
stop = False
sourceCount = 0
while not stop:
status_code, content, self._searchURL = self._get_tns_search_results()
if status_code != 200:
self.log.error(
'cound not get the search reuslts from the TNS, HTML error code %(status_code)s ' % locals())
return None
if "No results found" in content:
print "No results found"
return sourceTable, photoTable, specTable, relatedFilesTable
if self._parse_transient_rows(content, True) < self.batchSize:
stop = True
else:
self.page += 1
thisPage = self.page
print "Downloaded %(thisPage)s page(s) from the TNS. %(sourceCount)s transients parsed so far." % locals()
sourceCount += self.batchSize
print "\t" + self._searchURL
timesleep.sleep(1)
# PARSE ALL ROWS RETURNED
for transientRow in self._parse_transient_rows(content):
# TOP LEVEL DISCOVERY CONTENT
sourceContent = transientRow.group()
discInfo, TNSId = self._parse_discovery_information(
sourceContent)
sourceTable.append(discInfo)
# PHOTOMETERY
phot, relatedFiles = self._parse_photometry_data(
sourceContent, TNSId)
photoTable += phot
relatedFilesTable += relatedFiles
# SPECTRA
spec, relatedFiles = self._parse_spectral_data(
sourceContent, TNSId)
specTable += spec
relatedFilesTable += relatedFiles
# SORT BY SEPARATION FROM THE SEARCH COORDINATES
try:
sourceTable = sorted(sourceTable, key=itemgetter(
'separationArcsec'), reverse=False)
except:
pass
self.log.info('completed the ``get`` method')
return sourceTable, photoTable, specTable, relatedFilesTable
def _get_tns_search_results(
self):
"""
*query the tns and result the response*
"""
self.log.info('starting the ``_get_tns_search_results`` method')
try:
response = requests.get(
url="http://wis-tns.weizmann.ac.il/search",
params={
"page": self.page,
"ra": self.ra,
"decl": self.dec,
"radius": self.radiusArcsec,
"name": self.name,
"internal_name": self.internal_name,
"date_start[date]": self.start,
"date_end[date]": self.end,
"num_page": self.batchSize,
"display[redshift]": "1",
"display[hostname]": "1",
"display[host_redshift]": "1",
"display[source_group_name]": "1",
"display[internal_name]": "1",
"display[spectra_count]": "1",
"display[discoverymag]": "1",
"display[discmagfilter]": "1",
"display[discoverydate]": "1",
"display[discoverer]": "1",
"display[sources]": "1",
"display[bibcode]": "1",
},
)
except requests.exceptions.RequestException:
print('HTTP Request failed')
self.log.info('completed the ``_get_tns_search_results`` method')
return response.status_code, response.content, response.url
def _parse_transient_rows(
self,
content,
count=False):
"""* parse transient rows from the TNS result page content*
**Key Arguments:**
- ``content`` -- the content from the TNS results page.
- ``count`` -- return only the number of rows
**Return:**
- ``transientRows``
"""
self.log.info('starting the ``_parse_transient_rows`` method')
regexForRow = r"""\n([^\n]*?<a href="/object/.*?)(?=\n[^\n]*?<a href="/object/|<\!\-\- /\.section, /#content \-\->)"""
if count:
# A SINGLE SOURCE BLOCK
matchedSources = re.findall(
regexForRow,
content,
flags=re.S # re.S
)
return len(matchedSources)
# A SINGLE SOURCE BLOCK
matchedSources = re.finditer(
regexForRow,
content,
flags=re.S # re.S
)
self.log.info('completed the ``_parse_transient_rows`` method')
return matchedSources
def _parse_discovery_information(
self,
content):
"""* parse discovery information from one row on the TNS results page*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page.
**Return:**
- ``discoveryData`` -- dictionary of results
- ``TNSId`` -- the unique TNS id for the transient
"""
self.log.info('starting the ``_parse_discovery_information`` method')
# ASTROCALC UNIT CONVERTER OBJECT
converter = unit_conversion(
log=self.log
)
matches = re.finditer(
r"""<tr class="row-.*?"><td class="cell-id">(?P<tnsId>\d*?)</td><td class="cell-name"><a href="(?P<objectUrl>.*?)">(?P<TNSName>.*?)</a></td><td class="cell-.*?<td class="cell-ra">(?P<raSex>.*?)</td><td class="cell-decl">(?P<decSex>.*?)</td><td class="cell-ot_name">(?P<specType>.*?)</td><td class="cell-redshift">(?P<transRedshift>.*?)</td><td class="cell-hostname">(?P<hostName>.*?)</td><td class="cell-host_redshift">(?P<hostRedshift>.*?)</td><td class="cell-source_group_name">(?P<discSurvey>.*?)</td>.*?<td class="cell-internal_name">(<a.*?>)?(?P<discoveryName>.*?)(</a>)?</td>.*?<td class="cell-discoverymag">(?P<discMag>.*?)</td><td class="cell-disc_filter_name">(?P<discMagFilter>.*?)</td><td class="cell-discoverydate">(?P<discDate>.*?)</td><td class="cell-discoverer">(?P<sender>.*?)</td>.*?</tr>""",
content,
flags=0 # re.S
)
discoveryData = []
for match in matches:
row = match.groupdict()
for k, v in row.iteritems():
row[k] = v.strip()
if len(v) == 0:
row[k] = None
if row["transRedshift"] == 0:
row["transRedshift"] = None
if row["TNSName"][0] in ["1", "2"]:
row["TNSName"] = "SN" + row["TNSName"]
row["objectUrl"] = "http://wis-tns.weizmann.ac.il" + \
row["objectUrl"]
# CONVERT COORDINATES TO DECIMAL DEGREES
row["raDeg"] = converter.ra_sexegesimal_to_decimal(
ra=row["raSex"]
)
row["decDeg"] = converter.dec_sexegesimal_to_decimal(
dec=row["decSex"]
)
# IF THIS IS A COORDINATE SEARCH, ADD SEPARATION FROM
# ORIGINAL QUERY COORDINATES
if self.ra:
# CALCULATE SEPARATION IN ARCSEC
from astrocalc.coords import separations
calculator = separations(
log=self.log,
ra1=self.ra,
dec1=self.dec,
ra2=row["raDeg"],
dec2=row["decDeg"],
)
angularSeparation, north, east = calculator.get()
row["separationArcsec"] = angularSeparation
row["separationNorthArcsec"] = north
row["separationEastArcsec"] = east
if not row["discSurvey"]:
row["survey"] = row["sender"]
del row["sender"]
del row["tnsId"]
row["TNSName"] = row["TNSName"].replace(" ", "")
row["TNSId"] = row["TNSName"].replace(
"SN", "").replace("AT", "")
TNSId = row["TNSId"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "TNSName", "discoveryName", "discSurvey", "raSex", "decSex", "raDeg", "decDeg",
"transRedshift", "specType", "discMag", "discMagFilter", "discDate", "objectUrl", "hostName", "hostRedshift", "separationArcsec", "separationNorthArcsec", "separationEastArcsec"]
for k, v in row.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = row[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
discoveryData.append(row)
self.log.info('completed the ``_parse_discovery_information`` method')
return discoveryData[0], TNSId
def _parse_photometry_data(
self,
content,
TNSId):
"""*parse photometry data from a row in the tns results content*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page
- ``TNSId`` -- the tns id of the transient
**Return:**
- ``photData`` -- a list of dictionaries of the photometry data
- ``relatedFilesTable`` -- a list of dictionaries of transient photometry related files
"""
self.log.info('starting the ``_parse_photometry_data`` method')
photData = []
relatedFilesTable = []
# AT REPORT BLOCK
ATBlock = re.search(
r"""<tr class=[^\n]*?AT reportings.*?(?=<tr class=[^\n]*?Classification reportings|$)""",
content,
flags=re.S # re.S
)
if ATBlock:
ATBlock = ATBlock.group()
reports = re.finditer(
r"""<tr class="row-[^"]*"><td class="cell-id">.*?</table>""",
ATBlock,
flags=re.S # re.S
)
relatedFiles = self._parse_related_files(ATBlock)
for r in reports:
header = re.search(
r"""<tr class="row[^"]*".*?time_received">(?P<reportAddedDate>[^<]*).*?user_name">(?P<sender>[^<]*).*?reporter_name">(?P<reporters>[^<]*).*?source_group_name">(?P<surveyGroup>[^<]*).*?ra">(?P<ra>[^<]*).*?decl">(?P<dec>[^<]*).*?discovery_date">(?P<obsDate>[^<]*).*?flux">(?P<mag>[^<]*).*?filter_name">(?P<magFilter>[^<]*).*?related_files">(?P<relatedFiles>[^<]*).*?type_name">(?P<suggestedType>[^<]*).*?hostname">(?P<hostName>[^<]*).*?host_redshift">(?P<hostRedshift>[^<]*).*?internal_name">(?P<objectName>[^<]*).*?groups">(?P<survey>[^<]*).*?remarks">(?P<sourceComment>[^<]*)""",
r.group(),
flags=0 # re.S
)
try:
header = header.groupdict()
except:
print r.group()
header["TNSId"] = TNSId
del header["reporters"]
del header["surveyGroup"]
del header["hostName"]
del header["hostRedshift"]
del header["mag"]
del header["magFilter"]
del header["obsDate"]
del header["ra"]
del header["dec"]
if not self.comments:
del header['sourceComment']
else:
theseComments = header[
"sourceComment"].split("\n")
header["sourceComment"] = ""
for c in theseComments:
header["sourceComment"] += " " + c.strip()
header["sourceComment"] = header[
"sourceComment"].strip().replace('"', "'")[0:750]
phot = re.finditer(
r"""<tr class="row\-[^"]*".*?obsdate">(?P<obsdate>[^<]*).*?flux">(?P<mag>[^<]*).*?fluxerr">(?P<magErr>[^<]*).*?limflux">(?P<limitingMag>[^<]*).*?unit_name">(?P<magUnit>[^<]*).*?filter_name">(?P<filter>[^<]*).*?tel_inst">(?P<telescope>[^<]*).*?exptime">(?P<exptime>[^<]*).*?observer">(?P<observer>[^<]*).*?-remarks">(?P<remarks>[^<]*)""",
r.group(),
flags=0 # re.S
)
filesAppended = False
for p in phot:
p = p.groupdict()
del p["observer"]
if p["limitingMag"] and not p["mag"]:
p["mag"] = p["limitingMag"]
p["limitingMag"] = 1
p["remarks"] = p["remarks"].replace(
"[Last non detection]", "")
else:
p["limitingMag"] = 0
if not self.comments:
del p["remarks"]
p.update(header)
if p["relatedFiles"] and filesAppended == False:
filesAppended = True
for f in relatedFiles:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = f[
"filepath"].split("/")[-1]
thisFile["url"] = f["filepath"]
if self.comments:
thisFile["comment"] = f[
"fileComment"].replace("\n", " ").strip().replace('"', "'")[0:750]
thisFile["dateObs"] = p["obsdate"]
thisFile["spec1phot2"] = 2
relatedFilesTable.append(thisFile)
if not p["survey"] and not p["objectName"]:
p["survey"] = p["sender"]
del p["relatedFiles"]
del p["sender"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "survey", "obsdate", "filter", "limitingMag", "mag", "magErr",
"magUnit", "suggestedType", "telescope", "exptime", "reportAddedDate"]
for k, v in p.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = p[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
photData.append(orow)
self.log.info('completed the ``_parse_photometry_data`` method')
return photData, relatedFilesTable
def _parse_related_files(
self,
content):
"""*parse the contents for related files URLs and comments*
**Key Arguments:**
- ``content`` -- the content to parse.
**Return:**
- ``relatedFiles`` -- a list of dictionaries of transient related files
"""
self.log.info('starting the ``_parse_related_files`` method')
relatedFilesList = re.finditer(
r"""<td class="cell-filename">.*?href="(?P<filepath>[^"]*).*?remarks">(?P<fileComment>[^<]*)""",
content,
flags=0 # re.S
)
relatedFiles = []
for f in relatedFilesList:
f = f.groupdict()
relatedFiles.append(f)
self.log.info('completed the ``_parse_related_files`` method')
return relatedFiles
def _parse_spectral_data(
self,
content,
TNSId):
"""*parse spectra data from a row in the tns results content*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page
- ``TNSId`` -- the tns id of the transient
**Return:**
- ``specData`` -- a list of dictionaries of the spectral data
- ``relatedFilesTable`` -- a list of dictionaries of transient spectrum related files
"""
self.log.info('starting the ``_parse_spectral_data`` method')
specData = []
relatedFilesTable = []
# CLASSIFICATION BLOCK
classBlock = re.search(
r"""<tr class=[^\n]*?Classification reportings.*$""",
content,
flags=re.S # re.S
)
if classBlock:
classBlock = classBlock.group()
reports = re.finditer(
r"""<tr class="row-[^"]*"><td class="cell-id">.*?</tbody>\s*</table>\s*</div></td> </tr>\s*</tbody>\s*</table>\s*</div></td> </tr>""",
classBlock,
flags=re.S #
)
relatedFiles = self._parse_related_files(classBlock)
for r in reports:
header = re.search(
r"""<tr class="row.*?time_received">(?P<reportAddedDate>[^<]*).*?user_name">(?P<TNSuser>[^<]*).*?classifier_name">(?P<reporters>[^<]*).*?source_group_name">(?P<survey>[^<]*).*?-type">(?P<specType>[^<]*).*?-redshift">(?P<transRedshift>[^<]*).*?-related_files">(?P<relatedFiles>[^<]*).*?-groups">(?P<surveyGroup>[^<]*).*?-remarks">(?P<sourceComment>[^<]*)</td>""",
r.group(),
flags=re.S # re.S
)
if not header:
continue
header = header.groupdict()
header["TNSId"] = TNSId
del header["reporters"]
del header["surveyGroup"]
del header["survey"]
if not self.comments:
del header['sourceComment']
else:
theseComments = header[
"sourceComment"].split("\n")
header["sourceComment"] = ""
for c in theseComments:
header["sourceComment"] += " " + c.strip()
header["sourceComment"] = header[
"sourceComment"].strip().replace('"', "'")[0:750]
spec = re.finditer(
r"""<tr class="class-results-.*?-obsdate">(?P<obsdate>[^<]*).*?-tel_inst">(?P<telescope>[^<]*).*?-exptime">(?P<exptime>[^<]*).*?-observer">(?P<sender>[^<]*).*?-reducer">(?P<reducer>[^<]*).*?-source_group_name">(?P<survey>[^<]*).*?-asciifile">(.*?<a href="(?P<filepath>[^"]*)".*?</a>)?.*?-fitsfile">(.*?<a href="(?P<fitsFilepath>[^"]*)".*?</a>)?.*?-groups">(?P<surveyGroup>[^<]*).*?-remarks">(?P<remarks>[^<]*)""",
r.group(),
flags=0 # re.S
)
filesAppended = False
for s in spec:
s = s.groupdict()
del s["sender"]
del s["surveyGroup"]
del s["reducer"]
if not self.comments:
del s["remarks"]
else:
s["remarks"] = s["remarks"].replace('"', "'")[0:750]
s.update(header)
if s["relatedFiles"] and filesAppended == False:
filesAppended = True
for f in relatedFiles:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = f[
"filepath"].split("/")[-1]
thisFile["url"] = f["filepath"]
if self.comments:
thisFile["comment"] = f[
"fileComment"].replace("\n", " ").strip()
thisFile["dateObs"] = s["obsdate"]
thisFile["spec1phot2"] = 1
relatedFilesTable.append(thisFile)
for ffile in [s["filepath"], s["fitsFilepath"]]:
if ffile:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = ffile.split(
"/")[-1]
thisFile["url"] = ffile
if self.comments:
thisFile["comment"] = ""
thisFile["dateObs"] = s["obsdate"]
thisFile["spec1phot2"] = 1
relatedFilesTable.append(thisFile)
del s["filepath"]
del s["fitsFilepath"]
del s["relatedFiles"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "survey", "obsdate", "specType", "transRedshift",
"telescope", "exptime", "reportAddedDate", "TNSuser"]
for k, v in s.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = s[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
specData.append(orow)
self.log.info('completed the ``_parse_spectral_data`` method')
return specData, relatedFilesTable
|
thespacedoctor/transientNamer
|
transientNamer/search.py
|
search._parse_transient_rows
|
python
|
def _parse_transient_rows(
self,
content,
count=False):
self.log.info('starting the ``_parse_transient_rows`` method')
regexForRow = r"""\n([^\n]*?<a href="/object/.*?)(?=\n[^\n]*?<a href="/object/|<\!\-\- /\.section, /#content \-\->)"""
if count:
# A SINGLE SOURCE BLOCK
matchedSources = re.findall(
regexForRow,
content,
flags=re.S # re.S
)
return len(matchedSources)
# A SINGLE SOURCE BLOCK
matchedSources = re.finditer(
regexForRow,
content,
flags=re.S # re.S
)
self.log.info('completed the ``_parse_transient_rows`` method')
return matchedSources
|
* parse transient rows from the TNS result page content*
**Key Arguments:**
- ``content`` -- the content from the TNS results page.
- ``count`` -- return only the number of rows
**Return:**
- ``transientRows``
|
train
|
https://github.com/thespacedoctor/transientNamer/blob/39be410c84275ed4669632f5df67e728d66a318f/transientNamer/search.py#L845-L879
| null |
class search():
"""
*The worker class for the transient namer search module*
**Key Arguments:**
- ``log`` -- logger
- ``settings`` -- the settings dictionary
- ``ra`` -- RA of the location being checked
- ``dec`` -- DEC of the location being searched
- ``radiusArcsec`` - the radius of the conesearch to perform against the TNS
- ``name`` -- name of the object to search the TNS for
- ``discInLastDays`` -- search the TNS for transient discovered in the last X days
- ``comments`` -- print the comments from the TNS, note these can be long making table outputs somewhat unreadable. Default *False*
**Usage:**
To initiate a search object to search the TNS via an object name (either TNS or survey names accepted):
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
name="Gaia16bbi"
)
or for a conesearch use something similar to:
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
ra="06:50:36.74",
dec="+31:06:44.7",
radiusArcsec=5
)
Note the search method can accept coordinates in sexagesimal or decimal defree formats.
To list all new objects discovered in the last three weeks, then use:
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
discInLastDays=21
)
"""
# Initialisation
def __init__(
self,
log,
ra="",
dec="",
radiusArcsec="",
name="",
discInLastDays="",
settings=False,
comments=False
):
self.log = log
log.debug("instansiating a new 'search' object")
self.settings = settings
self.ra = ra
self.dec = dec
self.radiusArcsec = radiusArcsec
self.comments = comments
self.name = name
self.internal_name = ""
self.discInLastDays = discInLastDays
self.page = 0
self.batchSize = 1000
# CREATE THE TIME-RANGE WINDOW TO SEARCH TNS
if not discInLastDays:
self.start = ""
self.end = ""
else:
discInLastDays = int(discInLastDays)
td = timedelta(days=1)
end = datetime.now() + td
self.end = end.strftime("%Y-%m-%d")
td = timedelta(days=discInLastDays)
start = datetime.now() - td
self.start = start.strftime("%Y-%m-%d")
# DETERMINE IF WE HAVE A TNS OR INTERAL SURVEY NAME
if self.name:
matchObject = re.match(r'^((SN|AT) ?)?(\d{4}\w{1,6})', self.name)
if matchObject:
self.name = matchObject.group(3)
else:
self.internal_name = self.name
self.name = ""
# DO THE SEARCH OF THE TNS AND COMPILE THE RESULTS INTO SEPARATE RESULT
# SETS
self.sourceResultsList, self.photResultsList, self.specResultsList, self.relatedFilesResultsList = self._query_tns()
self.sourceResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.sourceResultsList
)
self.photResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.photResultsList
)
self.specResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.specResultsList
)
self.relatedFilesResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.relatedFilesResultsList
)
return None
@property
def sources(
self):
"""*The results of the search returned as a python list of dictionaries*
**Usage:**
.. code-block:: python
sources = tns.sources
"""
sourceResultsList = []
sourceResultsList[:] = [dict(l) for l in self.sourceResultsList]
return sourceResultsList
@property
def spectra(
self):
"""*The associated source spectral data*
**Usage:**
.. code-block:: python
sourceSpectra = tns.spectra
"""
specResultsList = []
specResultsList[:] = [dict(l) for l in self.specResultsList]
return specResultsList
@property
def files(
self):
"""*The associated source files*
**Usage:**
.. code-block:: python
sourceFiles = tns.files
"""
relatedFilesResultsList = []
relatedFilesResultsList[:] = [dict(l)
for l in self.relatedFilesResultsList]
return relatedFilesResultsList
@property
def photometry(
self):
"""*The associated source photometry*
**Usage:**
.. code-block:: python
sourcePhotometry = tns.photometry
"""
photResultsList = []
photResultsList[:] = [dict(l) for l in self.photResultsList]
return photResultsList
@property
def url(
self):
"""*The generated URL used for searching of the TNS*
**Usage:**
.. code-block:: python
searchURL = tns.url
"""
return self._searchURL
def csv(
self,
dirPath=None):
"""*Render the results in csv format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `csvSources` -- the top-level transient data
- `csvPhot` -- all photometry associated with the transients
- `csvSpec` -- all spectral data associated with the transients
- `csvFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in csv format:
.. code-block:: python
csvSources, csvPhot, csvSpec, csvFiles = tns.csv()
print csvSources
.. code-block:: text
TNSId,TNSName,discoveryName,discSurvey,raSex,decSex,raDeg,decDeg,transRedshift,specType,discMag,discMagFilter,discDate,objectUrl,hostName,hostRedshift,separationArcsec,separationNorthArcsec,separationEastArcsec
2016asf,SN2016asf,ASASSN-16cs,ASAS-SN,06:50:36.73,+31:06:45.36,102.6530,31.1126,0.021,SN Ia,17.1,V-Johnson,2016-03-06 08:09:36,http://wis-tns.weizmann.ac.il/object/2016asf,KUG 0647+311,,0.66,0.65,-0.13
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.csv("~/tns")
.. image:: https://i.imgur.com/BwwqMBg.png
:width: 800px
:alt: csv output
"""
if dirPath:
p = self._file_prefix()
csvSources = self.sourceResults.csv(
filepath=dirPath + "/" + p + "sources.csv")
csvPhot = self.photResults.csv(
filepath=dirPath + "/" + p + "phot.csv")
csvSpec = self.specResults.csv(
filepath=dirPath + "/" + p + "spec.csv")
csvFiles = self.relatedFilesResults.csv(
filepath=dirPath + "/" + p + "relatedFiles.csv")
else:
csvSources = self.sourceResults.csv()
csvPhot = self.photResults.csv()
csvSpec = self.specResults.csv()
csvFiles = self.relatedFilesResults.csv()
return csvSources, csvPhot, csvSpec, csvFiles
def json(
self,
dirPath=None):
"""*Render the results in json format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `jsonSources` -- the top-level transient data
- `jsonPhot` -- all photometry associated with the transients
- `jsonSpec` -- all spectral data associated with the transients
- `jsonFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in json format:
.. code-block:: python
jsonSources, jsonPhot, jsonSpec, jsonFiles = tns.json()
print jsonSources
.. code-block:: text
[
{
"TNSId": "2016asf",
"TNSName": "SN2016asf",
"decDeg": 31.1126,
"decSex": "+31:06:45.36",
"discDate": "2016-03-06 08:09:36",
"discMag": "17.1",
"discMagFilter": "V-Johnson",
"discSurvey": "ASAS-SN",
"discoveryName": "ASASSN-16cs",
"hostName": "KUG 0647+311",
"hostRedshift": null,
"objectUrl": "http://wis-tns.weizmann.ac.il/object/2016asf",
"raDeg": 102.65304166666667,
"raSex": "06:50:36.73",
"separationArcsec": "0.66",
"separationEastArcsec": "-0.13",
"separationNorthArcsec": "0.65",
"specType": "SN Ia",
"transRedshift": "0.021"
}
]
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.json("~/tns")
.. image:: https://i.imgur.com/wAHqARI.png
:width: 800px
:alt: json output
"""
if dirPath:
p = self._file_prefix()
jsonSources = self.sourceResults.json(
filepath=dirPath + "/" + p + "sources.json")
jsonPhot = self.photResults.json(
filepath=dirPath + "/" + p + "phot.json")
jsonSpec = self.specResults.json(
filepath=dirPath + "/" + p + "spec.json")
jsonFiles = self.relatedFilesResults.json(
filepath=dirPath + "/" + p + "relatedFiles.json")
else:
jsonSources = self.sourceResults.json()
jsonPhot = self.photResults.json()
jsonSpec = self.specResults.json()
jsonFiles = self.relatedFilesResults.json()
return jsonSources, jsonPhot, jsonSpec, jsonFiles
def yaml(
self,
dirPath=None):
"""*Render the results in yaml format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `yamlSources` -- the top-level transient data
- `yamlPhot` -- all photometry associated with the transients
- `yamlSpec` -- all spectral data associated with the transients
- `yamlFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in yaml format:
.. code-block:: python
yamlSources, yamlPhot, yamlSpec, yamlFiles = tns.yaml()
print yamlSources
.. code-block:: text
- TNSId: 2016asf
TNSName: SN2016asf
decDeg: 31.1126
decSex: '+31:06:45.36'
discDate: '2016-03-06 08:09:36'
discMag: '17.1'
discMagFilter: V-Johnson
discSurvey: ASAS-SN
discoveryName: ASASSN-16cs
hostName: KUG 0647+311
hostRedshift: null
objectUrl: http://wis-tns.weizmann.ac.il/object/2016asf
raDeg: 102.65304166666667
raSex: '06:50:36.73'
separationArcsec: '0.66'
separationEastArcsec: '-0.13'
separationNorthArcsec: '0.65'
specType: SN Ia
transRedshift: '0.021'
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.yaml("~/tns")
.. image:: https://i.imgur.com/ZpJIC6p.png
:width: 800px
:alt: yaml output
"""
if dirPath:
p = self._file_prefix()
yamlSources = self.sourceResults.yaml(
filepath=dirPath + "/" + p + "sources.yaml")
yamlPhot = self.photResults.yaml(
filepath=dirPath + "/" + p + "phot.yaml")
yamlSpec = self.specResults.yaml(
filepath=dirPath + "/" + p + "spec.yaml")
yamlFiles = self.relatedFilesResults.yaml(
filepath=dirPath + "/" + p + "relatedFiles.yaml")
else:
yamlSources = self.sourceResults.yaml()
yamlPhot = self.photResults.yaml()
yamlSpec = self.specResults.yaml()
yamlFiles = self.relatedFilesResults.yaml()
return yamlSources, yamlPhot, yamlSpec, yamlFiles
def markdown(
self,
dirPath=None):
"""*Render the results in markdown format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `markdownSources` -- the top-level transient data
- `markdownPhot` -- all photometry associated with the transients
- `markdownSpec` -- all spectral data associated with the transients
- `markdownFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in markdown table format:
.. code-block:: python
markdownSources, markdownPhot, markdownSpec, markdownFiles = tns.markdown()
print markdownSources
.. code-block:: text
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
|:---------|:-----------|:---------------|:------------|:-------------|:--------------|:----------|:---------|:---------------|:----------|:---------|:---------------|:---------------------|:----------------------------------------------|:--------------|:--------------|:------------------|:-----------------------|:----------------------|
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.markdown("~/tns")
.. image:: https://i.imgur.com/AYLBQoJ.png
:width: 800px
:alt: markdown output
"""
if dirPath:
p = self._file_prefix()
markdownSources = self.sourceResults.markdown(
filepath=dirPath + "/" + p + "sources.md")
markdownPhot = self.photResults.markdown(
filepath=dirPath + "/" + p + "phot.md")
markdownSpec = self.specResults.markdown(
filepath=dirPath + "/" + p + "spec.md")
markdownFiles = self.relatedFilesResults.markdown(
filepath=dirPath + "/" + p + "relatedFiles.md")
else:
markdownSources = self.sourceResults.markdown()
markdownPhot = self.photResults.markdown()
markdownSpec = self.specResults.markdown()
markdownFiles = self.relatedFilesResults.markdown()
return markdownSources, markdownPhot, markdownSpec, markdownFiles
def table(
self,
dirPath=None):
"""*Render the results as an ascii table*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `tableSources` -- the top-level transient data
- `tablePhot` -- all photometry associated with the transients
- `tableSpec` -- all spectral data associated with the transients
- `tableFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in ascii table format:
.. code-block:: python
tableSources, tablePhot, tableSpec, tableFiles = tns.table()
print tableSources
.. code-block:: text
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.table("~/tns")
.. image:: https://i.imgur.com/m09M0ho.png
:width: 800px
:alt: ascii files
"""
if dirPath:
p = self._file_prefix()
tableSources = self.sourceResults.table(
filepath=dirPath + "/" + p + "sources.ascii")
tablePhot = self.photResults.table(
filepath=dirPath + "/" + p + "phot.ascii")
tableSpec = self.specResults.table(
filepath=dirPath + "/" + p + "spec.ascii")
tableFiles = self.relatedFilesResults.table(
filepath=dirPath + "/" + p + "relatedFiles.ascii")
else:
tableSources = self.sourceResults.table()
tablePhot = self.photResults.table()
tableSpec = self.specResults.table()
tableFiles = self.relatedFilesResults.table()
return tableSources, tablePhot, tableSpec, tableFiles
def mysql(
self,
tableNamePrefix="TNS",
dirPath=None):
"""*Render the results as MySQL Insert statements*
**Key Arguments:**
- ``tableNamePrefix`` -- the prefix for the database table names to assign the insert statements to. Default *TNS*.
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `mysqlSources` -- the top-level transient data
- `mysqlPhot` -- all photometry associated with the transients
- `mysqlSpec` -- all spectral data associated with the transients
- `mysqlFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in mysql insert format:
.. code-block:: python
mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles = tns.mysql("TNS")
print mysqlSources
.. code-block:: text
INSERT INTO `TNS_sources` (TNSId,TNSName,dateCreated,decDeg,decSex,discDate,discMag,discMagFilter,discSurvey,discoveryName,hostName,hostRedshift,objectUrl,raDeg,raSex,separationArcsec,separationEastArcsec,separationNorthArcsec,specType,transRedshift) VALUES ("2016asf" ,"SN2016asf" ,"2016-09-20T11:22:13" ,"31.1126" ,"+31:06:45.36" ,"2016-03-06 08:09:36" ,"17.1" ,"V-Johnson" ,"ASAS-SN" ,"ASASSN-16cs" ,"KUG 0647+311" ,null ,"http://wis-tns.weizmann.ac.il/object/2016asf" ,"102.653041667" ,"06:50:36.73" ,"0.66" ,"-0.13" ,"0.65" ,"SN Ia" ,"0.021") ON DUPLICATE KEY UPDATE TNSId="2016asf", TNSName="SN2016asf", dateCreated="2016-09-20T11:22:13", decDeg="31.1126", decSex="+31:06:45.36", discDate="2016-03-06 08:09:36", discMag="17.1", discMagFilter="V-Johnson", discSurvey="ASAS-SN", discoveryName="ASASSN-16cs", hostName="KUG 0647+311", hostRedshift=null, objectUrl="http://wis-tns.weizmann.ac.il/object/2016asf", raDeg="102.653041667", raSex="06:50:36.73", separationArcsec="0.66", separationEastArcsec="-0.13", separationNorthArcsec="0.65", specType="SN Ia", transRedshift="0.021", updated=1, dateLastModified=NOW() ;
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.mysql("TNS", "~/tns")
.. image:: https://i.imgur.com/CozySPW.png
:width: 800px
:alt: mysql output
"""
if dirPath:
p = self._file_prefix()
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_sources` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(20) NOT NULL,
`TNSName` varchar(20) DEFAULT NULL,
`dateCreated` datetime DEFAULT NULL,
`decDeg` double DEFAULT NULL,
`decSex` varchar(45) DEFAULT NULL,
`discDate` datetime DEFAULT NULL,
`discMag` double DEFAULT NULL,
`discMagFilter` varchar(45) DEFAULT NULL,
`discSurvey` varchar(100) DEFAULT NULL,
`discoveryName` varchar(100) DEFAULT NULL,
`objectUrl` varchar(200) DEFAULT NULL,
`raDeg` double DEFAULT NULL,
`raSex` varchar(45) DEFAULT NULL,
`specType` varchar(100) DEFAULT NULL,
`transRedshift` double DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`hostName` VARCHAR(100) NULL DEFAULT NULL,
`hostRedshift` DOUBLE NULL DEFAULT NULL,
`survey` VARCHAR(100) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid` (`TNSId`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlSources = self.sourceResults.mysql(
tableNamePrefix + "_sources", filepath=dirPath + "/" + p + "sources.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_photometry` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(20) NOT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`exptime` double DEFAULT NULL,
`filter` varchar(100) DEFAULT NULL,
`limitingMag` tinyint(4) DEFAULT NULL,
`mag` double DEFAULT NULL,
`magErr` double DEFAULT NULL,
`magUnit` varchar(100) DEFAULT NULL,
`objectName` varchar(100) DEFAULT NULL,
`obsdate` datetime DEFAULT NULL,
`reportAddedDate` datetime DEFAULT NULL,
`suggestedType` varchar(100) DEFAULT NULL,
`survey` varchar(100) DEFAULT NULL,
`telescope` varchar(100) DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`remarks` VARCHAR(800) NULL DEFAULT NULL,
`sourceComment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid_survey_obsdate` (`TNSId`,`survey`,`obsdate`),
UNIQUE INDEX `u_tnsid_survey_obsdate` (`TNSId` ASC, `survey` ASC, `obsdate` ASC),
UNIQUE INDEX `u_tnsid_obsdate_objname` (`TNSId` ASC, `obsdate` ASC, `objectName` ASC)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlPhot = self.photResults.mysql(
tableNamePrefix + "_photometry", filepath=dirPath + "/" + p + "phot.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_spectra` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(45) NOT NULL,
`TNSuser` varchar(45) DEFAULT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`exptime` double DEFAULT NULL,
`obsdate` datetime DEFAULT NULL,
`reportAddedDate` datetime DEFAULT NULL,
`specType` varchar(100) DEFAULT NULL,
`survey` varchar(100) DEFAULT NULL,
`telescope` varchar(100) DEFAULT NULL,
`transRedshift` double DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`remarks` VARCHAR(800) NULL DEFAULT NULL,
`sourceComment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `u_tnsid_survey_obsdate` (`TNSId`,`survey`,`obsdate`),
UNIQUE KEY `u_id_user_obsdate` (`TNSId`,`TNSuser`,`obsdate`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlSpec = self.specResults.mysql(
tableNamePrefix + "_spectra", filepath=dirPath + "/" + p + "spec.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_files` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(100) NOT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`dateObs` datetime DEFAULT NULL,
`filename` varchar(200) DEFAULT NULL,
`spec1phot2` tinyint(4) DEFAULT NULL,
`url` varchar(800) DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`comment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid_url` (`TNSId`,`url`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlFiles = self.relatedFilesResults.mysql(
tableNamePrefix + "_files", filepath=dirPath + "/" + p + "relatedFiles.sql", createStatement=createStatement)
else:
mysqlSources = self.sourceResults.mysql(
tableNamePrefix + "_sources")
mysqlPhot = self.photResults.mysql(tableNamePrefix + "_photometry")
mysqlSpec = self.specResults.mysql(tableNamePrefix + "_spectra")
mysqlFiles = self.relatedFilesResults.mysql(
tableNamePrefix + "_files")
return mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles
def _query_tns(self):
"""
*determine how to query the TNS, send query and parse the results*
**Return:**
- ``results`` -- a list of dictionaries (one dictionary for each result set returned from the TNS)
"""
self.log.info('starting the ``get`` method')
sourceTable = []
photoTable = []
specTable = []
relatedFilesTable = []
# THIS stop IS TO KEEP TRACK OF THE TNS PAGINATION IF MANY RESULT PAGES
# ARE RETURNED
stop = False
sourceCount = 0
while not stop:
status_code, content, self._searchURL = self._get_tns_search_results()
if status_code != 200:
self.log.error(
'cound not get the search reuslts from the TNS, HTML error code %(status_code)s ' % locals())
return None
if "No results found" in content:
print "No results found"
return sourceTable, photoTable, specTable, relatedFilesTable
if self._parse_transient_rows(content, True) < self.batchSize:
stop = True
else:
self.page += 1
thisPage = self.page
print "Downloaded %(thisPage)s page(s) from the TNS. %(sourceCount)s transients parsed so far." % locals()
sourceCount += self.batchSize
print "\t" + self._searchURL
timesleep.sleep(1)
# PARSE ALL ROWS RETURNED
for transientRow in self._parse_transient_rows(content):
# TOP LEVEL DISCOVERY CONTENT
sourceContent = transientRow.group()
discInfo, TNSId = self._parse_discovery_information(
sourceContent)
sourceTable.append(discInfo)
# PHOTOMETERY
phot, relatedFiles = self._parse_photometry_data(
sourceContent, TNSId)
photoTable += phot
relatedFilesTable += relatedFiles
# SPECTRA
spec, relatedFiles = self._parse_spectral_data(
sourceContent, TNSId)
specTable += spec
relatedFilesTable += relatedFiles
# SORT BY SEPARATION FROM THE SEARCH COORDINATES
try:
sourceTable = sorted(sourceTable, key=itemgetter(
'separationArcsec'), reverse=False)
except:
pass
self.log.info('completed the ``get`` method')
return sourceTable, photoTable, specTable, relatedFilesTable
def _get_tns_search_results(
self):
"""
*query the tns and result the response*
"""
self.log.info('starting the ``_get_tns_search_results`` method')
try:
response = requests.get(
url="http://wis-tns.weizmann.ac.il/search",
params={
"page": self.page,
"ra": self.ra,
"decl": self.dec,
"radius": self.radiusArcsec,
"name": self.name,
"internal_name": self.internal_name,
"date_start[date]": self.start,
"date_end[date]": self.end,
"num_page": self.batchSize,
"display[redshift]": "1",
"display[hostname]": "1",
"display[host_redshift]": "1",
"display[source_group_name]": "1",
"display[internal_name]": "1",
"display[spectra_count]": "1",
"display[discoverymag]": "1",
"display[discmagfilter]": "1",
"display[discoverydate]": "1",
"display[discoverer]": "1",
"display[sources]": "1",
"display[bibcode]": "1",
},
)
except requests.exceptions.RequestException:
print('HTTP Request failed')
self.log.info('completed the ``_get_tns_search_results`` method')
return response.status_code, response.content, response.url
def _file_prefix(
self):
"""*Generate a file prefix based on the type of search for saving files to disk*
**Return:**
- ``prefix`` -- the file prefix
"""
self.log.info('starting the ``_file_prefix`` method')
if self.ra:
now = datetime.now()
prefix = now.strftime("%Y%m%dt%H%M%S%f_tns_conesearch_")
elif self.name:
prefix = self.name + "_tns_conesearch_"
elif self.internal_name:
prefix = self.internal_name + "_tns_conesearch_"
elif self.discInLastDays:
discInLastDays = str(self.discInLastDays)
now = datetime.now()
prefix = now.strftime(
discInLastDays + "d_since_%Y%m%d_tns_conesearch_")
self.log.info('completed the ``_file_prefix`` method')
return prefix
def _parse_discovery_information(
self,
content):
"""* parse discovery information from one row on the TNS results page*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page.
**Return:**
- ``discoveryData`` -- dictionary of results
- ``TNSId`` -- the unique TNS id for the transient
"""
self.log.info('starting the ``_parse_discovery_information`` method')
# ASTROCALC UNIT CONVERTER OBJECT
converter = unit_conversion(
log=self.log
)
matches = re.finditer(
r"""<tr class="row-.*?"><td class="cell-id">(?P<tnsId>\d*?)</td><td class="cell-name"><a href="(?P<objectUrl>.*?)">(?P<TNSName>.*?)</a></td><td class="cell-.*?<td class="cell-ra">(?P<raSex>.*?)</td><td class="cell-decl">(?P<decSex>.*?)</td><td class="cell-ot_name">(?P<specType>.*?)</td><td class="cell-redshift">(?P<transRedshift>.*?)</td><td class="cell-hostname">(?P<hostName>.*?)</td><td class="cell-host_redshift">(?P<hostRedshift>.*?)</td><td class="cell-source_group_name">(?P<discSurvey>.*?)</td>.*?<td class="cell-internal_name">(<a.*?>)?(?P<discoveryName>.*?)(</a>)?</td>.*?<td class="cell-discoverymag">(?P<discMag>.*?)</td><td class="cell-disc_filter_name">(?P<discMagFilter>.*?)</td><td class="cell-discoverydate">(?P<discDate>.*?)</td><td class="cell-discoverer">(?P<sender>.*?)</td>.*?</tr>""",
content,
flags=0 # re.S
)
discoveryData = []
for match in matches:
row = match.groupdict()
for k, v in row.iteritems():
row[k] = v.strip()
if len(v) == 0:
row[k] = None
if row["transRedshift"] == 0:
row["transRedshift"] = None
if row["TNSName"][0] in ["1", "2"]:
row["TNSName"] = "SN" + row["TNSName"]
row["objectUrl"] = "http://wis-tns.weizmann.ac.il" + \
row["objectUrl"]
# CONVERT COORDINATES TO DECIMAL DEGREES
row["raDeg"] = converter.ra_sexegesimal_to_decimal(
ra=row["raSex"]
)
row["decDeg"] = converter.dec_sexegesimal_to_decimal(
dec=row["decSex"]
)
# IF THIS IS A COORDINATE SEARCH, ADD SEPARATION FROM
# ORIGINAL QUERY COORDINATES
if self.ra:
# CALCULATE SEPARATION IN ARCSEC
from astrocalc.coords import separations
calculator = separations(
log=self.log,
ra1=self.ra,
dec1=self.dec,
ra2=row["raDeg"],
dec2=row["decDeg"],
)
angularSeparation, north, east = calculator.get()
row["separationArcsec"] = angularSeparation
row["separationNorthArcsec"] = north
row["separationEastArcsec"] = east
if not row["discSurvey"]:
row["survey"] = row["sender"]
del row["sender"]
del row["tnsId"]
row["TNSName"] = row["TNSName"].replace(" ", "")
row["TNSId"] = row["TNSName"].replace(
"SN", "").replace("AT", "")
TNSId = row["TNSId"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "TNSName", "discoveryName", "discSurvey", "raSex", "decSex", "raDeg", "decDeg",
"transRedshift", "specType", "discMag", "discMagFilter", "discDate", "objectUrl", "hostName", "hostRedshift", "separationArcsec", "separationNorthArcsec", "separationEastArcsec"]
for k, v in row.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = row[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
discoveryData.append(row)
self.log.info('completed the ``_parse_discovery_information`` method')
return discoveryData[0], TNSId
def _parse_photometry_data(
self,
content,
TNSId):
"""*parse photometry data from a row in the tns results content*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page
- ``TNSId`` -- the tns id of the transient
**Return:**
- ``photData`` -- a list of dictionaries of the photometry data
- ``relatedFilesTable`` -- a list of dictionaries of transient photometry related files
"""
self.log.info('starting the ``_parse_photometry_data`` method')
photData = []
relatedFilesTable = []
# AT REPORT BLOCK
ATBlock = re.search(
r"""<tr class=[^\n]*?AT reportings.*?(?=<tr class=[^\n]*?Classification reportings|$)""",
content,
flags=re.S # re.S
)
if ATBlock:
ATBlock = ATBlock.group()
reports = re.finditer(
r"""<tr class="row-[^"]*"><td class="cell-id">.*?</table>""",
ATBlock,
flags=re.S # re.S
)
relatedFiles = self._parse_related_files(ATBlock)
for r in reports:
header = re.search(
r"""<tr class="row[^"]*".*?time_received">(?P<reportAddedDate>[^<]*).*?user_name">(?P<sender>[^<]*).*?reporter_name">(?P<reporters>[^<]*).*?source_group_name">(?P<surveyGroup>[^<]*).*?ra">(?P<ra>[^<]*).*?decl">(?P<dec>[^<]*).*?discovery_date">(?P<obsDate>[^<]*).*?flux">(?P<mag>[^<]*).*?filter_name">(?P<magFilter>[^<]*).*?related_files">(?P<relatedFiles>[^<]*).*?type_name">(?P<suggestedType>[^<]*).*?hostname">(?P<hostName>[^<]*).*?host_redshift">(?P<hostRedshift>[^<]*).*?internal_name">(?P<objectName>[^<]*).*?groups">(?P<survey>[^<]*).*?remarks">(?P<sourceComment>[^<]*)""",
r.group(),
flags=0 # re.S
)
try:
header = header.groupdict()
except:
print r.group()
header["TNSId"] = TNSId
del header["reporters"]
del header["surveyGroup"]
del header["hostName"]
del header["hostRedshift"]
del header["mag"]
del header["magFilter"]
del header["obsDate"]
del header["ra"]
del header["dec"]
if not self.comments:
del header['sourceComment']
else:
theseComments = header[
"sourceComment"].split("\n")
header["sourceComment"] = ""
for c in theseComments:
header["sourceComment"] += " " + c.strip()
header["sourceComment"] = header[
"sourceComment"].strip().replace('"', "'")[0:750]
phot = re.finditer(
r"""<tr class="row\-[^"]*".*?obsdate">(?P<obsdate>[^<]*).*?flux">(?P<mag>[^<]*).*?fluxerr">(?P<magErr>[^<]*).*?limflux">(?P<limitingMag>[^<]*).*?unit_name">(?P<magUnit>[^<]*).*?filter_name">(?P<filter>[^<]*).*?tel_inst">(?P<telescope>[^<]*).*?exptime">(?P<exptime>[^<]*).*?observer">(?P<observer>[^<]*).*?-remarks">(?P<remarks>[^<]*)""",
r.group(),
flags=0 # re.S
)
filesAppended = False
for p in phot:
p = p.groupdict()
del p["observer"]
if p["limitingMag"] and not p["mag"]:
p["mag"] = p["limitingMag"]
p["limitingMag"] = 1
p["remarks"] = p["remarks"].replace(
"[Last non detection]", "")
else:
p["limitingMag"] = 0
if not self.comments:
del p["remarks"]
p.update(header)
if p["relatedFiles"] and filesAppended == False:
filesAppended = True
for f in relatedFiles:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = f[
"filepath"].split("/")[-1]
thisFile["url"] = f["filepath"]
if self.comments:
thisFile["comment"] = f[
"fileComment"].replace("\n", " ").strip().replace('"', "'")[0:750]
thisFile["dateObs"] = p["obsdate"]
thisFile["spec1phot2"] = 2
relatedFilesTable.append(thisFile)
if not p["survey"] and not p["objectName"]:
p["survey"] = p["sender"]
del p["relatedFiles"]
del p["sender"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "survey", "obsdate", "filter", "limitingMag", "mag", "magErr",
"magUnit", "suggestedType", "telescope", "exptime", "reportAddedDate"]
for k, v in p.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = p[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
photData.append(orow)
self.log.info('completed the ``_parse_photometry_data`` method')
return photData, relatedFilesTable
def _parse_related_files(
self,
content):
"""*parse the contents for related files URLs and comments*
**Key Arguments:**
- ``content`` -- the content to parse.
**Return:**
- ``relatedFiles`` -- a list of dictionaries of transient related files
"""
self.log.info('starting the ``_parse_related_files`` method')
relatedFilesList = re.finditer(
r"""<td class="cell-filename">.*?href="(?P<filepath>[^"]*).*?remarks">(?P<fileComment>[^<]*)""",
content,
flags=0 # re.S
)
relatedFiles = []
for f in relatedFilesList:
f = f.groupdict()
relatedFiles.append(f)
self.log.info('completed the ``_parse_related_files`` method')
return relatedFiles
def _parse_spectral_data(
self,
content,
TNSId):
"""*parse spectra data from a row in the tns results content*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page
- ``TNSId`` -- the tns id of the transient
**Return:**
- ``specData`` -- a list of dictionaries of the spectral data
- ``relatedFilesTable`` -- a list of dictionaries of transient spectrum related files
"""
self.log.info('starting the ``_parse_spectral_data`` method')
specData = []
relatedFilesTable = []
# CLASSIFICATION BLOCK
classBlock = re.search(
r"""<tr class=[^\n]*?Classification reportings.*$""",
content,
flags=re.S # re.S
)
if classBlock:
classBlock = classBlock.group()
reports = re.finditer(
r"""<tr class="row-[^"]*"><td class="cell-id">.*?</tbody>\s*</table>\s*</div></td> </tr>\s*</tbody>\s*</table>\s*</div></td> </tr>""",
classBlock,
flags=re.S #
)
relatedFiles = self._parse_related_files(classBlock)
for r in reports:
header = re.search(
r"""<tr class="row.*?time_received">(?P<reportAddedDate>[^<]*).*?user_name">(?P<TNSuser>[^<]*).*?classifier_name">(?P<reporters>[^<]*).*?source_group_name">(?P<survey>[^<]*).*?-type">(?P<specType>[^<]*).*?-redshift">(?P<transRedshift>[^<]*).*?-related_files">(?P<relatedFiles>[^<]*).*?-groups">(?P<surveyGroup>[^<]*).*?-remarks">(?P<sourceComment>[^<]*)</td>""",
r.group(),
flags=re.S # re.S
)
if not header:
continue
header = header.groupdict()
header["TNSId"] = TNSId
del header["reporters"]
del header["surveyGroup"]
del header["survey"]
if not self.comments:
del header['sourceComment']
else:
theseComments = header[
"sourceComment"].split("\n")
header["sourceComment"] = ""
for c in theseComments:
header["sourceComment"] += " " + c.strip()
header["sourceComment"] = header[
"sourceComment"].strip().replace('"', "'")[0:750]
spec = re.finditer(
r"""<tr class="class-results-.*?-obsdate">(?P<obsdate>[^<]*).*?-tel_inst">(?P<telescope>[^<]*).*?-exptime">(?P<exptime>[^<]*).*?-observer">(?P<sender>[^<]*).*?-reducer">(?P<reducer>[^<]*).*?-source_group_name">(?P<survey>[^<]*).*?-asciifile">(.*?<a href="(?P<filepath>[^"]*)".*?</a>)?.*?-fitsfile">(.*?<a href="(?P<fitsFilepath>[^"]*)".*?</a>)?.*?-groups">(?P<surveyGroup>[^<]*).*?-remarks">(?P<remarks>[^<]*)""",
r.group(),
flags=0 # re.S
)
filesAppended = False
for s in spec:
s = s.groupdict()
del s["sender"]
del s["surveyGroup"]
del s["reducer"]
if not self.comments:
del s["remarks"]
else:
s["remarks"] = s["remarks"].replace('"', "'")[0:750]
s.update(header)
if s["relatedFiles"] and filesAppended == False:
filesAppended = True
for f in relatedFiles:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = f[
"filepath"].split("/")[-1]
thisFile["url"] = f["filepath"]
if self.comments:
thisFile["comment"] = f[
"fileComment"].replace("\n", " ").strip()
thisFile["dateObs"] = s["obsdate"]
thisFile["spec1phot2"] = 1
relatedFilesTable.append(thisFile)
for ffile in [s["filepath"], s["fitsFilepath"]]:
if ffile:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = ffile.split(
"/")[-1]
thisFile["url"] = ffile
if self.comments:
thisFile["comment"] = ""
thisFile["dateObs"] = s["obsdate"]
thisFile["spec1phot2"] = 1
relatedFilesTable.append(thisFile)
del s["filepath"]
del s["fitsFilepath"]
del s["relatedFiles"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "survey", "obsdate", "specType", "transRedshift",
"telescope", "exptime", "reportAddedDate", "TNSuser"]
for k, v in s.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = s[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
specData.append(orow)
self.log.info('completed the ``_parse_spectral_data`` method')
return specData, relatedFilesTable
|
thespacedoctor/transientNamer
|
transientNamer/search.py
|
search._parse_discovery_information
|
python
|
def _parse_discovery_information(
self,
content):
self.log.info('starting the ``_parse_discovery_information`` method')
# ASTROCALC UNIT CONVERTER OBJECT
converter = unit_conversion(
log=self.log
)
matches = re.finditer(
r"""<tr class="row-.*?"><td class="cell-id">(?P<tnsId>\d*?)</td><td class="cell-name"><a href="(?P<objectUrl>.*?)">(?P<TNSName>.*?)</a></td><td class="cell-.*?<td class="cell-ra">(?P<raSex>.*?)</td><td class="cell-decl">(?P<decSex>.*?)</td><td class="cell-ot_name">(?P<specType>.*?)</td><td class="cell-redshift">(?P<transRedshift>.*?)</td><td class="cell-hostname">(?P<hostName>.*?)</td><td class="cell-host_redshift">(?P<hostRedshift>.*?)</td><td class="cell-source_group_name">(?P<discSurvey>.*?)</td>.*?<td class="cell-internal_name">(<a.*?>)?(?P<discoveryName>.*?)(</a>)?</td>.*?<td class="cell-discoverymag">(?P<discMag>.*?)</td><td class="cell-disc_filter_name">(?P<discMagFilter>.*?)</td><td class="cell-discoverydate">(?P<discDate>.*?)</td><td class="cell-discoverer">(?P<sender>.*?)</td>.*?</tr>""",
content,
flags=0 # re.S
)
discoveryData = []
for match in matches:
row = match.groupdict()
for k, v in row.iteritems():
row[k] = v.strip()
if len(v) == 0:
row[k] = None
if row["transRedshift"] == 0:
row["transRedshift"] = None
if row["TNSName"][0] in ["1", "2"]:
row["TNSName"] = "SN" + row["TNSName"]
row["objectUrl"] = "http://wis-tns.weizmann.ac.il" + \
row["objectUrl"]
# CONVERT COORDINATES TO DECIMAL DEGREES
row["raDeg"] = converter.ra_sexegesimal_to_decimal(
ra=row["raSex"]
)
row["decDeg"] = converter.dec_sexegesimal_to_decimal(
dec=row["decSex"]
)
# IF THIS IS A COORDINATE SEARCH, ADD SEPARATION FROM
# ORIGINAL QUERY COORDINATES
if self.ra:
# CALCULATE SEPARATION IN ARCSEC
from astrocalc.coords import separations
calculator = separations(
log=self.log,
ra1=self.ra,
dec1=self.dec,
ra2=row["raDeg"],
dec2=row["decDeg"],
)
angularSeparation, north, east = calculator.get()
row["separationArcsec"] = angularSeparation
row["separationNorthArcsec"] = north
row["separationEastArcsec"] = east
if not row["discSurvey"]:
row["survey"] = row["sender"]
del row["sender"]
del row["tnsId"]
row["TNSName"] = row["TNSName"].replace(" ", "")
row["TNSId"] = row["TNSName"].replace(
"SN", "").replace("AT", "")
TNSId = row["TNSId"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "TNSName", "discoveryName", "discSurvey", "raSex", "decSex", "raDeg", "decDeg",
"transRedshift", "specType", "discMag", "discMagFilter", "discDate", "objectUrl", "hostName", "hostRedshift", "separationArcsec", "separationNorthArcsec", "separationEastArcsec"]
for k, v in row.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = row[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
discoveryData.append(row)
self.log.info('completed the ``_parse_discovery_information`` method')
return discoveryData[0], TNSId
|
* parse discovery information from one row on the TNS results page*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page.
**Return:**
- ``discoveryData`` -- dictionary of results
- ``TNSId`` -- the unique TNS id for the transient
|
train
|
https://github.com/thespacedoctor/transientNamer/blob/39be410c84275ed4669632f5df67e728d66a318f/transientNamer/search.py#L881-L971
| null |
class search():
"""
*The worker class for the transient namer search module*
**Key Arguments:**
- ``log`` -- logger
- ``settings`` -- the settings dictionary
- ``ra`` -- RA of the location being checked
- ``dec`` -- DEC of the location being searched
- ``radiusArcsec`` - the radius of the conesearch to perform against the TNS
- ``name`` -- name of the object to search the TNS for
- ``discInLastDays`` -- search the TNS for transient discovered in the last X days
- ``comments`` -- print the comments from the TNS, note these can be long making table outputs somewhat unreadable. Default *False*
**Usage:**
To initiate a search object to search the TNS via an object name (either TNS or survey names accepted):
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
name="Gaia16bbi"
)
or for a conesearch use something similar to:
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
ra="06:50:36.74",
dec="+31:06:44.7",
radiusArcsec=5
)
Note the search method can accept coordinates in sexagesimal or decimal defree formats.
To list all new objects discovered in the last three weeks, then use:
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
discInLastDays=21
)
"""
# Initialisation
def __init__(
self,
log,
ra="",
dec="",
radiusArcsec="",
name="",
discInLastDays="",
settings=False,
comments=False
):
self.log = log
log.debug("instansiating a new 'search' object")
self.settings = settings
self.ra = ra
self.dec = dec
self.radiusArcsec = radiusArcsec
self.comments = comments
self.name = name
self.internal_name = ""
self.discInLastDays = discInLastDays
self.page = 0
self.batchSize = 1000
# CREATE THE TIME-RANGE WINDOW TO SEARCH TNS
if not discInLastDays:
self.start = ""
self.end = ""
else:
discInLastDays = int(discInLastDays)
td = timedelta(days=1)
end = datetime.now() + td
self.end = end.strftime("%Y-%m-%d")
td = timedelta(days=discInLastDays)
start = datetime.now() - td
self.start = start.strftime("%Y-%m-%d")
# DETERMINE IF WE HAVE A TNS OR INTERAL SURVEY NAME
if self.name:
matchObject = re.match(r'^((SN|AT) ?)?(\d{4}\w{1,6})', self.name)
if matchObject:
self.name = matchObject.group(3)
else:
self.internal_name = self.name
self.name = ""
# DO THE SEARCH OF THE TNS AND COMPILE THE RESULTS INTO SEPARATE RESULT
# SETS
self.sourceResultsList, self.photResultsList, self.specResultsList, self.relatedFilesResultsList = self._query_tns()
self.sourceResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.sourceResultsList
)
self.photResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.photResultsList
)
self.specResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.specResultsList
)
self.relatedFilesResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.relatedFilesResultsList
)
return None
@property
def sources(
self):
"""*The results of the search returned as a python list of dictionaries*
**Usage:**
.. code-block:: python
sources = tns.sources
"""
sourceResultsList = []
sourceResultsList[:] = [dict(l) for l in self.sourceResultsList]
return sourceResultsList
@property
def spectra(
self):
"""*The associated source spectral data*
**Usage:**
.. code-block:: python
sourceSpectra = tns.spectra
"""
specResultsList = []
specResultsList[:] = [dict(l) for l in self.specResultsList]
return specResultsList
@property
def files(
self):
"""*The associated source files*
**Usage:**
.. code-block:: python
sourceFiles = tns.files
"""
relatedFilesResultsList = []
relatedFilesResultsList[:] = [dict(l)
for l in self.relatedFilesResultsList]
return relatedFilesResultsList
@property
def photometry(
self):
"""*The associated source photometry*
**Usage:**
.. code-block:: python
sourcePhotometry = tns.photometry
"""
photResultsList = []
photResultsList[:] = [dict(l) for l in self.photResultsList]
return photResultsList
@property
def url(
self):
"""*The generated URL used for searching of the TNS*
**Usage:**
.. code-block:: python
searchURL = tns.url
"""
return self._searchURL
def csv(
self,
dirPath=None):
"""*Render the results in csv format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `csvSources` -- the top-level transient data
- `csvPhot` -- all photometry associated with the transients
- `csvSpec` -- all spectral data associated with the transients
- `csvFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in csv format:
.. code-block:: python
csvSources, csvPhot, csvSpec, csvFiles = tns.csv()
print csvSources
.. code-block:: text
TNSId,TNSName,discoveryName,discSurvey,raSex,decSex,raDeg,decDeg,transRedshift,specType,discMag,discMagFilter,discDate,objectUrl,hostName,hostRedshift,separationArcsec,separationNorthArcsec,separationEastArcsec
2016asf,SN2016asf,ASASSN-16cs,ASAS-SN,06:50:36.73,+31:06:45.36,102.6530,31.1126,0.021,SN Ia,17.1,V-Johnson,2016-03-06 08:09:36,http://wis-tns.weizmann.ac.il/object/2016asf,KUG 0647+311,,0.66,0.65,-0.13
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.csv("~/tns")
.. image:: https://i.imgur.com/BwwqMBg.png
:width: 800px
:alt: csv output
"""
if dirPath:
p = self._file_prefix()
csvSources = self.sourceResults.csv(
filepath=dirPath + "/" + p + "sources.csv")
csvPhot = self.photResults.csv(
filepath=dirPath + "/" + p + "phot.csv")
csvSpec = self.specResults.csv(
filepath=dirPath + "/" + p + "spec.csv")
csvFiles = self.relatedFilesResults.csv(
filepath=dirPath + "/" + p + "relatedFiles.csv")
else:
csvSources = self.sourceResults.csv()
csvPhot = self.photResults.csv()
csvSpec = self.specResults.csv()
csvFiles = self.relatedFilesResults.csv()
return csvSources, csvPhot, csvSpec, csvFiles
def json(
self,
dirPath=None):
"""*Render the results in json format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `jsonSources` -- the top-level transient data
- `jsonPhot` -- all photometry associated with the transients
- `jsonSpec` -- all spectral data associated with the transients
- `jsonFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in json format:
.. code-block:: python
jsonSources, jsonPhot, jsonSpec, jsonFiles = tns.json()
print jsonSources
.. code-block:: text
[
{
"TNSId": "2016asf",
"TNSName": "SN2016asf",
"decDeg": 31.1126,
"decSex": "+31:06:45.36",
"discDate": "2016-03-06 08:09:36",
"discMag": "17.1",
"discMagFilter": "V-Johnson",
"discSurvey": "ASAS-SN",
"discoveryName": "ASASSN-16cs",
"hostName": "KUG 0647+311",
"hostRedshift": null,
"objectUrl": "http://wis-tns.weizmann.ac.il/object/2016asf",
"raDeg": 102.65304166666667,
"raSex": "06:50:36.73",
"separationArcsec": "0.66",
"separationEastArcsec": "-0.13",
"separationNorthArcsec": "0.65",
"specType": "SN Ia",
"transRedshift": "0.021"
}
]
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.json("~/tns")
.. image:: https://i.imgur.com/wAHqARI.png
:width: 800px
:alt: json output
"""
if dirPath:
p = self._file_prefix()
jsonSources = self.sourceResults.json(
filepath=dirPath + "/" + p + "sources.json")
jsonPhot = self.photResults.json(
filepath=dirPath + "/" + p + "phot.json")
jsonSpec = self.specResults.json(
filepath=dirPath + "/" + p + "spec.json")
jsonFiles = self.relatedFilesResults.json(
filepath=dirPath + "/" + p + "relatedFiles.json")
else:
jsonSources = self.sourceResults.json()
jsonPhot = self.photResults.json()
jsonSpec = self.specResults.json()
jsonFiles = self.relatedFilesResults.json()
return jsonSources, jsonPhot, jsonSpec, jsonFiles
def yaml(
self,
dirPath=None):
"""*Render the results in yaml format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `yamlSources` -- the top-level transient data
- `yamlPhot` -- all photometry associated with the transients
- `yamlSpec` -- all spectral data associated with the transients
- `yamlFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in yaml format:
.. code-block:: python
yamlSources, yamlPhot, yamlSpec, yamlFiles = tns.yaml()
print yamlSources
.. code-block:: text
- TNSId: 2016asf
TNSName: SN2016asf
decDeg: 31.1126
decSex: '+31:06:45.36'
discDate: '2016-03-06 08:09:36'
discMag: '17.1'
discMagFilter: V-Johnson
discSurvey: ASAS-SN
discoveryName: ASASSN-16cs
hostName: KUG 0647+311
hostRedshift: null
objectUrl: http://wis-tns.weizmann.ac.il/object/2016asf
raDeg: 102.65304166666667
raSex: '06:50:36.73'
separationArcsec: '0.66'
separationEastArcsec: '-0.13'
separationNorthArcsec: '0.65'
specType: SN Ia
transRedshift: '0.021'
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.yaml("~/tns")
.. image:: https://i.imgur.com/ZpJIC6p.png
:width: 800px
:alt: yaml output
"""
if dirPath:
p = self._file_prefix()
yamlSources = self.sourceResults.yaml(
filepath=dirPath + "/" + p + "sources.yaml")
yamlPhot = self.photResults.yaml(
filepath=dirPath + "/" + p + "phot.yaml")
yamlSpec = self.specResults.yaml(
filepath=dirPath + "/" + p + "spec.yaml")
yamlFiles = self.relatedFilesResults.yaml(
filepath=dirPath + "/" + p + "relatedFiles.yaml")
else:
yamlSources = self.sourceResults.yaml()
yamlPhot = self.photResults.yaml()
yamlSpec = self.specResults.yaml()
yamlFiles = self.relatedFilesResults.yaml()
return yamlSources, yamlPhot, yamlSpec, yamlFiles
def markdown(
self,
dirPath=None):
"""*Render the results in markdown format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `markdownSources` -- the top-level transient data
- `markdownPhot` -- all photometry associated with the transients
- `markdownSpec` -- all spectral data associated with the transients
- `markdownFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in markdown table format:
.. code-block:: python
markdownSources, markdownPhot, markdownSpec, markdownFiles = tns.markdown()
print markdownSources
.. code-block:: text
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
|:---------|:-----------|:---------------|:------------|:-------------|:--------------|:----------|:---------|:---------------|:----------|:---------|:---------------|:---------------------|:----------------------------------------------|:--------------|:--------------|:------------------|:-----------------------|:----------------------|
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.markdown("~/tns")
.. image:: https://i.imgur.com/AYLBQoJ.png
:width: 800px
:alt: markdown output
"""
if dirPath:
p = self._file_prefix()
markdownSources = self.sourceResults.markdown(
filepath=dirPath + "/" + p + "sources.md")
markdownPhot = self.photResults.markdown(
filepath=dirPath + "/" + p + "phot.md")
markdownSpec = self.specResults.markdown(
filepath=dirPath + "/" + p + "spec.md")
markdownFiles = self.relatedFilesResults.markdown(
filepath=dirPath + "/" + p + "relatedFiles.md")
else:
markdownSources = self.sourceResults.markdown()
markdownPhot = self.photResults.markdown()
markdownSpec = self.specResults.markdown()
markdownFiles = self.relatedFilesResults.markdown()
return markdownSources, markdownPhot, markdownSpec, markdownFiles
def table(
self,
dirPath=None):
"""*Render the results as an ascii table*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `tableSources` -- the top-level transient data
- `tablePhot` -- all photometry associated with the transients
- `tableSpec` -- all spectral data associated with the transients
- `tableFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in ascii table format:
.. code-block:: python
tableSources, tablePhot, tableSpec, tableFiles = tns.table()
print tableSources
.. code-block:: text
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.table("~/tns")
.. image:: https://i.imgur.com/m09M0ho.png
:width: 800px
:alt: ascii files
"""
if dirPath:
p = self._file_prefix()
tableSources = self.sourceResults.table(
filepath=dirPath + "/" + p + "sources.ascii")
tablePhot = self.photResults.table(
filepath=dirPath + "/" + p + "phot.ascii")
tableSpec = self.specResults.table(
filepath=dirPath + "/" + p + "spec.ascii")
tableFiles = self.relatedFilesResults.table(
filepath=dirPath + "/" + p + "relatedFiles.ascii")
else:
tableSources = self.sourceResults.table()
tablePhot = self.photResults.table()
tableSpec = self.specResults.table()
tableFiles = self.relatedFilesResults.table()
return tableSources, tablePhot, tableSpec, tableFiles
def mysql(
self,
tableNamePrefix="TNS",
dirPath=None):
"""*Render the results as MySQL Insert statements*
**Key Arguments:**
- ``tableNamePrefix`` -- the prefix for the database table names to assign the insert statements to. Default *TNS*.
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `mysqlSources` -- the top-level transient data
- `mysqlPhot` -- all photometry associated with the transients
- `mysqlSpec` -- all spectral data associated with the transients
- `mysqlFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in mysql insert format:
.. code-block:: python
mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles = tns.mysql("TNS")
print mysqlSources
.. code-block:: text
INSERT INTO `TNS_sources` (TNSId,TNSName,dateCreated,decDeg,decSex,discDate,discMag,discMagFilter,discSurvey,discoveryName,hostName,hostRedshift,objectUrl,raDeg,raSex,separationArcsec,separationEastArcsec,separationNorthArcsec,specType,transRedshift) VALUES ("2016asf" ,"SN2016asf" ,"2016-09-20T11:22:13" ,"31.1126" ,"+31:06:45.36" ,"2016-03-06 08:09:36" ,"17.1" ,"V-Johnson" ,"ASAS-SN" ,"ASASSN-16cs" ,"KUG 0647+311" ,null ,"http://wis-tns.weizmann.ac.il/object/2016asf" ,"102.653041667" ,"06:50:36.73" ,"0.66" ,"-0.13" ,"0.65" ,"SN Ia" ,"0.021") ON DUPLICATE KEY UPDATE TNSId="2016asf", TNSName="SN2016asf", dateCreated="2016-09-20T11:22:13", decDeg="31.1126", decSex="+31:06:45.36", discDate="2016-03-06 08:09:36", discMag="17.1", discMagFilter="V-Johnson", discSurvey="ASAS-SN", discoveryName="ASASSN-16cs", hostName="KUG 0647+311", hostRedshift=null, objectUrl="http://wis-tns.weizmann.ac.il/object/2016asf", raDeg="102.653041667", raSex="06:50:36.73", separationArcsec="0.66", separationEastArcsec="-0.13", separationNorthArcsec="0.65", specType="SN Ia", transRedshift="0.021", updated=1, dateLastModified=NOW() ;
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.mysql("TNS", "~/tns")
.. image:: https://i.imgur.com/CozySPW.png
:width: 800px
:alt: mysql output
"""
if dirPath:
p = self._file_prefix()
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_sources` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(20) NOT NULL,
`TNSName` varchar(20) DEFAULT NULL,
`dateCreated` datetime DEFAULT NULL,
`decDeg` double DEFAULT NULL,
`decSex` varchar(45) DEFAULT NULL,
`discDate` datetime DEFAULT NULL,
`discMag` double DEFAULT NULL,
`discMagFilter` varchar(45) DEFAULT NULL,
`discSurvey` varchar(100) DEFAULT NULL,
`discoveryName` varchar(100) DEFAULT NULL,
`objectUrl` varchar(200) DEFAULT NULL,
`raDeg` double DEFAULT NULL,
`raSex` varchar(45) DEFAULT NULL,
`specType` varchar(100) DEFAULT NULL,
`transRedshift` double DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`hostName` VARCHAR(100) NULL DEFAULT NULL,
`hostRedshift` DOUBLE NULL DEFAULT NULL,
`survey` VARCHAR(100) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid` (`TNSId`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlSources = self.sourceResults.mysql(
tableNamePrefix + "_sources", filepath=dirPath + "/" + p + "sources.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_photometry` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(20) NOT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`exptime` double DEFAULT NULL,
`filter` varchar(100) DEFAULT NULL,
`limitingMag` tinyint(4) DEFAULT NULL,
`mag` double DEFAULT NULL,
`magErr` double DEFAULT NULL,
`magUnit` varchar(100) DEFAULT NULL,
`objectName` varchar(100) DEFAULT NULL,
`obsdate` datetime DEFAULT NULL,
`reportAddedDate` datetime DEFAULT NULL,
`suggestedType` varchar(100) DEFAULT NULL,
`survey` varchar(100) DEFAULT NULL,
`telescope` varchar(100) DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`remarks` VARCHAR(800) NULL DEFAULT NULL,
`sourceComment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid_survey_obsdate` (`TNSId`,`survey`,`obsdate`),
UNIQUE INDEX `u_tnsid_survey_obsdate` (`TNSId` ASC, `survey` ASC, `obsdate` ASC),
UNIQUE INDEX `u_tnsid_obsdate_objname` (`TNSId` ASC, `obsdate` ASC, `objectName` ASC)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlPhot = self.photResults.mysql(
tableNamePrefix + "_photometry", filepath=dirPath + "/" + p + "phot.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_spectra` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(45) NOT NULL,
`TNSuser` varchar(45) DEFAULT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`exptime` double DEFAULT NULL,
`obsdate` datetime DEFAULT NULL,
`reportAddedDate` datetime DEFAULT NULL,
`specType` varchar(100) DEFAULT NULL,
`survey` varchar(100) DEFAULT NULL,
`telescope` varchar(100) DEFAULT NULL,
`transRedshift` double DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`remarks` VARCHAR(800) NULL DEFAULT NULL,
`sourceComment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `u_tnsid_survey_obsdate` (`TNSId`,`survey`,`obsdate`),
UNIQUE KEY `u_id_user_obsdate` (`TNSId`,`TNSuser`,`obsdate`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlSpec = self.specResults.mysql(
tableNamePrefix + "_spectra", filepath=dirPath + "/" + p + "spec.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_files` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(100) NOT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`dateObs` datetime DEFAULT NULL,
`filename` varchar(200) DEFAULT NULL,
`spec1phot2` tinyint(4) DEFAULT NULL,
`url` varchar(800) DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`comment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid_url` (`TNSId`,`url`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlFiles = self.relatedFilesResults.mysql(
tableNamePrefix + "_files", filepath=dirPath + "/" + p + "relatedFiles.sql", createStatement=createStatement)
else:
mysqlSources = self.sourceResults.mysql(
tableNamePrefix + "_sources")
mysqlPhot = self.photResults.mysql(tableNamePrefix + "_photometry")
mysqlSpec = self.specResults.mysql(tableNamePrefix + "_spectra")
mysqlFiles = self.relatedFilesResults.mysql(
tableNamePrefix + "_files")
return mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles
def _query_tns(self):
"""
*determine how to query the TNS, send query and parse the results*
**Return:**
- ``results`` -- a list of dictionaries (one dictionary for each result set returned from the TNS)
"""
self.log.info('starting the ``get`` method')
sourceTable = []
photoTable = []
specTable = []
relatedFilesTable = []
# THIS stop IS TO KEEP TRACK OF THE TNS PAGINATION IF MANY RESULT PAGES
# ARE RETURNED
stop = False
sourceCount = 0
while not stop:
status_code, content, self._searchURL = self._get_tns_search_results()
if status_code != 200:
self.log.error(
'cound not get the search reuslts from the TNS, HTML error code %(status_code)s ' % locals())
return None
if "No results found" in content:
print "No results found"
return sourceTable, photoTable, specTable, relatedFilesTable
if self._parse_transient_rows(content, True) < self.batchSize:
stop = True
else:
self.page += 1
thisPage = self.page
print "Downloaded %(thisPage)s page(s) from the TNS. %(sourceCount)s transients parsed so far." % locals()
sourceCount += self.batchSize
print "\t" + self._searchURL
timesleep.sleep(1)
# PARSE ALL ROWS RETURNED
for transientRow in self._parse_transient_rows(content):
# TOP LEVEL DISCOVERY CONTENT
sourceContent = transientRow.group()
discInfo, TNSId = self._parse_discovery_information(
sourceContent)
sourceTable.append(discInfo)
# PHOTOMETERY
phot, relatedFiles = self._parse_photometry_data(
sourceContent, TNSId)
photoTable += phot
relatedFilesTable += relatedFiles
# SPECTRA
spec, relatedFiles = self._parse_spectral_data(
sourceContent, TNSId)
specTable += spec
relatedFilesTable += relatedFiles
# SORT BY SEPARATION FROM THE SEARCH COORDINATES
try:
sourceTable = sorted(sourceTable, key=itemgetter(
'separationArcsec'), reverse=False)
except:
pass
self.log.info('completed the ``get`` method')
return sourceTable, photoTable, specTable, relatedFilesTable
def _get_tns_search_results(
self):
"""
*query the tns and result the response*
"""
self.log.info('starting the ``_get_tns_search_results`` method')
try:
response = requests.get(
url="http://wis-tns.weizmann.ac.il/search",
params={
"page": self.page,
"ra": self.ra,
"decl": self.dec,
"radius": self.radiusArcsec,
"name": self.name,
"internal_name": self.internal_name,
"date_start[date]": self.start,
"date_end[date]": self.end,
"num_page": self.batchSize,
"display[redshift]": "1",
"display[hostname]": "1",
"display[host_redshift]": "1",
"display[source_group_name]": "1",
"display[internal_name]": "1",
"display[spectra_count]": "1",
"display[discoverymag]": "1",
"display[discmagfilter]": "1",
"display[discoverydate]": "1",
"display[discoverer]": "1",
"display[sources]": "1",
"display[bibcode]": "1",
},
)
except requests.exceptions.RequestException:
print('HTTP Request failed')
self.log.info('completed the ``_get_tns_search_results`` method')
return response.status_code, response.content, response.url
def _file_prefix(
self):
"""*Generate a file prefix based on the type of search for saving files to disk*
**Return:**
- ``prefix`` -- the file prefix
"""
self.log.info('starting the ``_file_prefix`` method')
if self.ra:
now = datetime.now()
prefix = now.strftime("%Y%m%dt%H%M%S%f_tns_conesearch_")
elif self.name:
prefix = self.name + "_tns_conesearch_"
elif self.internal_name:
prefix = self.internal_name + "_tns_conesearch_"
elif self.discInLastDays:
discInLastDays = str(self.discInLastDays)
now = datetime.now()
prefix = now.strftime(
discInLastDays + "d_since_%Y%m%d_tns_conesearch_")
self.log.info('completed the ``_file_prefix`` method')
return prefix
def _parse_transient_rows(
self,
content,
count=False):
"""* parse transient rows from the TNS result page content*
**Key Arguments:**
- ``content`` -- the content from the TNS results page.
- ``count`` -- return only the number of rows
**Return:**
- ``transientRows``
"""
self.log.info('starting the ``_parse_transient_rows`` method')
regexForRow = r"""\n([^\n]*?<a href="/object/.*?)(?=\n[^\n]*?<a href="/object/|<\!\-\- /\.section, /#content \-\->)"""
if count:
# A SINGLE SOURCE BLOCK
matchedSources = re.findall(
regexForRow,
content,
flags=re.S # re.S
)
return len(matchedSources)
# A SINGLE SOURCE BLOCK
matchedSources = re.finditer(
regexForRow,
content,
flags=re.S # re.S
)
self.log.info('completed the ``_parse_transient_rows`` method')
return matchedSources
def _parse_photometry_data(
self,
content,
TNSId):
"""*parse photometry data from a row in the tns results content*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page
- ``TNSId`` -- the tns id of the transient
**Return:**
- ``photData`` -- a list of dictionaries of the photometry data
- ``relatedFilesTable`` -- a list of dictionaries of transient photometry related files
"""
self.log.info('starting the ``_parse_photometry_data`` method')
photData = []
relatedFilesTable = []
# AT REPORT BLOCK
ATBlock = re.search(
r"""<tr class=[^\n]*?AT reportings.*?(?=<tr class=[^\n]*?Classification reportings|$)""",
content,
flags=re.S # re.S
)
if ATBlock:
ATBlock = ATBlock.group()
reports = re.finditer(
r"""<tr class="row-[^"]*"><td class="cell-id">.*?</table>""",
ATBlock,
flags=re.S # re.S
)
relatedFiles = self._parse_related_files(ATBlock)
for r in reports:
header = re.search(
r"""<tr class="row[^"]*".*?time_received">(?P<reportAddedDate>[^<]*).*?user_name">(?P<sender>[^<]*).*?reporter_name">(?P<reporters>[^<]*).*?source_group_name">(?P<surveyGroup>[^<]*).*?ra">(?P<ra>[^<]*).*?decl">(?P<dec>[^<]*).*?discovery_date">(?P<obsDate>[^<]*).*?flux">(?P<mag>[^<]*).*?filter_name">(?P<magFilter>[^<]*).*?related_files">(?P<relatedFiles>[^<]*).*?type_name">(?P<suggestedType>[^<]*).*?hostname">(?P<hostName>[^<]*).*?host_redshift">(?P<hostRedshift>[^<]*).*?internal_name">(?P<objectName>[^<]*).*?groups">(?P<survey>[^<]*).*?remarks">(?P<sourceComment>[^<]*)""",
r.group(),
flags=0 # re.S
)
try:
header = header.groupdict()
except:
print r.group()
header["TNSId"] = TNSId
del header["reporters"]
del header["surveyGroup"]
del header["hostName"]
del header["hostRedshift"]
del header["mag"]
del header["magFilter"]
del header["obsDate"]
del header["ra"]
del header["dec"]
if not self.comments:
del header['sourceComment']
else:
theseComments = header[
"sourceComment"].split("\n")
header["sourceComment"] = ""
for c in theseComments:
header["sourceComment"] += " " + c.strip()
header["sourceComment"] = header[
"sourceComment"].strip().replace('"', "'")[0:750]
phot = re.finditer(
r"""<tr class="row\-[^"]*".*?obsdate">(?P<obsdate>[^<]*).*?flux">(?P<mag>[^<]*).*?fluxerr">(?P<magErr>[^<]*).*?limflux">(?P<limitingMag>[^<]*).*?unit_name">(?P<magUnit>[^<]*).*?filter_name">(?P<filter>[^<]*).*?tel_inst">(?P<telescope>[^<]*).*?exptime">(?P<exptime>[^<]*).*?observer">(?P<observer>[^<]*).*?-remarks">(?P<remarks>[^<]*)""",
r.group(),
flags=0 # re.S
)
filesAppended = False
for p in phot:
p = p.groupdict()
del p["observer"]
if p["limitingMag"] and not p["mag"]:
p["mag"] = p["limitingMag"]
p["limitingMag"] = 1
p["remarks"] = p["remarks"].replace(
"[Last non detection]", "")
else:
p["limitingMag"] = 0
if not self.comments:
del p["remarks"]
p.update(header)
if p["relatedFiles"] and filesAppended == False:
filesAppended = True
for f in relatedFiles:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = f[
"filepath"].split("/")[-1]
thisFile["url"] = f["filepath"]
if self.comments:
thisFile["comment"] = f[
"fileComment"].replace("\n", " ").strip().replace('"', "'")[0:750]
thisFile["dateObs"] = p["obsdate"]
thisFile["spec1phot2"] = 2
relatedFilesTable.append(thisFile)
if not p["survey"] and not p["objectName"]:
p["survey"] = p["sender"]
del p["relatedFiles"]
del p["sender"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "survey", "obsdate", "filter", "limitingMag", "mag", "magErr",
"magUnit", "suggestedType", "telescope", "exptime", "reportAddedDate"]
for k, v in p.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = p[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
photData.append(orow)
self.log.info('completed the ``_parse_photometry_data`` method')
return photData, relatedFilesTable
def _parse_related_files(
self,
content):
"""*parse the contents for related files URLs and comments*
**Key Arguments:**
- ``content`` -- the content to parse.
**Return:**
- ``relatedFiles`` -- a list of dictionaries of transient related files
"""
self.log.info('starting the ``_parse_related_files`` method')
relatedFilesList = re.finditer(
r"""<td class="cell-filename">.*?href="(?P<filepath>[^"]*).*?remarks">(?P<fileComment>[^<]*)""",
content,
flags=0 # re.S
)
relatedFiles = []
for f in relatedFilesList:
f = f.groupdict()
relatedFiles.append(f)
self.log.info('completed the ``_parse_related_files`` method')
return relatedFiles
def _parse_spectral_data(
self,
content,
TNSId):
"""*parse spectra data from a row in the tns results content*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page
- ``TNSId`` -- the tns id of the transient
**Return:**
- ``specData`` -- a list of dictionaries of the spectral data
- ``relatedFilesTable`` -- a list of dictionaries of transient spectrum related files
"""
self.log.info('starting the ``_parse_spectral_data`` method')
specData = []
relatedFilesTable = []
# CLASSIFICATION BLOCK
classBlock = re.search(
r"""<tr class=[^\n]*?Classification reportings.*$""",
content,
flags=re.S # re.S
)
if classBlock:
classBlock = classBlock.group()
reports = re.finditer(
r"""<tr class="row-[^"]*"><td class="cell-id">.*?</tbody>\s*</table>\s*</div></td> </tr>\s*</tbody>\s*</table>\s*</div></td> </tr>""",
classBlock,
flags=re.S #
)
relatedFiles = self._parse_related_files(classBlock)
for r in reports:
header = re.search(
r"""<tr class="row.*?time_received">(?P<reportAddedDate>[^<]*).*?user_name">(?P<TNSuser>[^<]*).*?classifier_name">(?P<reporters>[^<]*).*?source_group_name">(?P<survey>[^<]*).*?-type">(?P<specType>[^<]*).*?-redshift">(?P<transRedshift>[^<]*).*?-related_files">(?P<relatedFiles>[^<]*).*?-groups">(?P<surveyGroup>[^<]*).*?-remarks">(?P<sourceComment>[^<]*)</td>""",
r.group(),
flags=re.S # re.S
)
if not header:
continue
header = header.groupdict()
header["TNSId"] = TNSId
del header["reporters"]
del header["surveyGroup"]
del header["survey"]
if not self.comments:
del header['sourceComment']
else:
theseComments = header[
"sourceComment"].split("\n")
header["sourceComment"] = ""
for c in theseComments:
header["sourceComment"] += " " + c.strip()
header["sourceComment"] = header[
"sourceComment"].strip().replace('"', "'")[0:750]
spec = re.finditer(
r"""<tr class="class-results-.*?-obsdate">(?P<obsdate>[^<]*).*?-tel_inst">(?P<telescope>[^<]*).*?-exptime">(?P<exptime>[^<]*).*?-observer">(?P<sender>[^<]*).*?-reducer">(?P<reducer>[^<]*).*?-source_group_name">(?P<survey>[^<]*).*?-asciifile">(.*?<a href="(?P<filepath>[^"]*)".*?</a>)?.*?-fitsfile">(.*?<a href="(?P<fitsFilepath>[^"]*)".*?</a>)?.*?-groups">(?P<surveyGroup>[^<]*).*?-remarks">(?P<remarks>[^<]*)""",
r.group(),
flags=0 # re.S
)
filesAppended = False
for s in spec:
s = s.groupdict()
del s["sender"]
del s["surveyGroup"]
del s["reducer"]
if not self.comments:
del s["remarks"]
else:
s["remarks"] = s["remarks"].replace('"', "'")[0:750]
s.update(header)
if s["relatedFiles"] and filesAppended == False:
filesAppended = True
for f in relatedFiles:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = f[
"filepath"].split("/")[-1]
thisFile["url"] = f["filepath"]
if self.comments:
thisFile["comment"] = f[
"fileComment"].replace("\n", " ").strip()
thisFile["dateObs"] = s["obsdate"]
thisFile["spec1phot2"] = 1
relatedFilesTable.append(thisFile)
for ffile in [s["filepath"], s["fitsFilepath"]]:
if ffile:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = ffile.split(
"/")[-1]
thisFile["url"] = ffile
if self.comments:
thisFile["comment"] = ""
thisFile["dateObs"] = s["obsdate"]
thisFile["spec1phot2"] = 1
relatedFilesTable.append(thisFile)
del s["filepath"]
del s["fitsFilepath"]
del s["relatedFiles"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "survey", "obsdate", "specType", "transRedshift",
"telescope", "exptime", "reportAddedDate", "TNSuser"]
for k, v in s.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = s[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
specData.append(orow)
self.log.info('completed the ``_parse_spectral_data`` method')
return specData, relatedFilesTable
|
thespacedoctor/transientNamer
|
transientNamer/search.py
|
search._parse_photometry_data
|
python
|
def _parse_photometry_data(
self,
content,
TNSId):
self.log.info('starting the ``_parse_photometry_data`` method')
photData = []
relatedFilesTable = []
# AT REPORT BLOCK
ATBlock = re.search(
r"""<tr class=[^\n]*?AT reportings.*?(?=<tr class=[^\n]*?Classification reportings|$)""",
content,
flags=re.S # re.S
)
if ATBlock:
ATBlock = ATBlock.group()
reports = re.finditer(
r"""<tr class="row-[^"]*"><td class="cell-id">.*?</table>""",
ATBlock,
flags=re.S # re.S
)
relatedFiles = self._parse_related_files(ATBlock)
for r in reports:
header = re.search(
r"""<tr class="row[^"]*".*?time_received">(?P<reportAddedDate>[^<]*).*?user_name">(?P<sender>[^<]*).*?reporter_name">(?P<reporters>[^<]*).*?source_group_name">(?P<surveyGroup>[^<]*).*?ra">(?P<ra>[^<]*).*?decl">(?P<dec>[^<]*).*?discovery_date">(?P<obsDate>[^<]*).*?flux">(?P<mag>[^<]*).*?filter_name">(?P<magFilter>[^<]*).*?related_files">(?P<relatedFiles>[^<]*).*?type_name">(?P<suggestedType>[^<]*).*?hostname">(?P<hostName>[^<]*).*?host_redshift">(?P<hostRedshift>[^<]*).*?internal_name">(?P<objectName>[^<]*).*?groups">(?P<survey>[^<]*).*?remarks">(?P<sourceComment>[^<]*)""",
r.group(),
flags=0 # re.S
)
try:
header = header.groupdict()
except:
print r.group()
header["TNSId"] = TNSId
del header["reporters"]
del header["surveyGroup"]
del header["hostName"]
del header["hostRedshift"]
del header["mag"]
del header["magFilter"]
del header["obsDate"]
del header["ra"]
del header["dec"]
if not self.comments:
del header['sourceComment']
else:
theseComments = header[
"sourceComment"].split("\n")
header["sourceComment"] = ""
for c in theseComments:
header["sourceComment"] += " " + c.strip()
header["sourceComment"] = header[
"sourceComment"].strip().replace('"', "'")[0:750]
phot = re.finditer(
r"""<tr class="row\-[^"]*".*?obsdate">(?P<obsdate>[^<]*).*?flux">(?P<mag>[^<]*).*?fluxerr">(?P<magErr>[^<]*).*?limflux">(?P<limitingMag>[^<]*).*?unit_name">(?P<magUnit>[^<]*).*?filter_name">(?P<filter>[^<]*).*?tel_inst">(?P<telescope>[^<]*).*?exptime">(?P<exptime>[^<]*).*?observer">(?P<observer>[^<]*).*?-remarks">(?P<remarks>[^<]*)""",
r.group(),
flags=0 # re.S
)
filesAppended = False
for p in phot:
p = p.groupdict()
del p["observer"]
if p["limitingMag"] and not p["mag"]:
p["mag"] = p["limitingMag"]
p["limitingMag"] = 1
p["remarks"] = p["remarks"].replace(
"[Last non detection]", "")
else:
p["limitingMag"] = 0
if not self.comments:
del p["remarks"]
p.update(header)
if p["relatedFiles"] and filesAppended == False:
filesAppended = True
for f in relatedFiles:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = f[
"filepath"].split("/")[-1]
thisFile["url"] = f["filepath"]
if self.comments:
thisFile["comment"] = f[
"fileComment"].replace("\n", " ").strip().replace('"', "'")[0:750]
thisFile["dateObs"] = p["obsdate"]
thisFile["spec1phot2"] = 2
relatedFilesTable.append(thisFile)
if not p["survey"] and not p["objectName"]:
p["survey"] = p["sender"]
del p["relatedFiles"]
del p["sender"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "survey", "obsdate", "filter", "limitingMag", "mag", "magErr",
"magUnit", "suggestedType", "telescope", "exptime", "reportAddedDate"]
for k, v in p.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = p[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
photData.append(orow)
self.log.info('completed the ``_parse_photometry_data`` method')
return photData, relatedFilesTable
|
*parse photometry data from a row in the tns results content*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page
- ``TNSId`` -- the tns id of the transient
**Return:**
- ``photData`` -- a list of dictionaries of the photometry data
- ``relatedFilesTable`` -- a list of dictionaries of transient photometry related files
|
train
|
https://github.com/thespacedoctor/transientNamer/blob/39be410c84275ed4669632f5df67e728d66a318f/transientNamer/search.py#L973-L1106
| null |
class search():
"""
*The worker class for the transient namer search module*
**Key Arguments:**
- ``log`` -- logger
- ``settings`` -- the settings dictionary
- ``ra`` -- RA of the location being checked
- ``dec`` -- DEC of the location being searched
- ``radiusArcsec`` - the radius of the conesearch to perform against the TNS
- ``name`` -- name of the object to search the TNS for
- ``discInLastDays`` -- search the TNS for transient discovered in the last X days
- ``comments`` -- print the comments from the TNS, note these can be long making table outputs somewhat unreadable. Default *False*
**Usage:**
To initiate a search object to search the TNS via an object name (either TNS or survey names accepted):
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
name="Gaia16bbi"
)
or for a conesearch use something similar to:
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
ra="06:50:36.74",
dec="+31:06:44.7",
radiusArcsec=5
)
Note the search method can accept coordinates in sexagesimal or decimal defree formats.
To list all new objects discovered in the last three weeks, then use:
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
discInLastDays=21
)
"""
# Initialisation
def __init__(
self,
log,
ra="",
dec="",
radiusArcsec="",
name="",
discInLastDays="",
settings=False,
comments=False
):
self.log = log
log.debug("instansiating a new 'search' object")
self.settings = settings
self.ra = ra
self.dec = dec
self.radiusArcsec = radiusArcsec
self.comments = comments
self.name = name
self.internal_name = ""
self.discInLastDays = discInLastDays
self.page = 0
self.batchSize = 1000
# CREATE THE TIME-RANGE WINDOW TO SEARCH TNS
if not discInLastDays:
self.start = ""
self.end = ""
else:
discInLastDays = int(discInLastDays)
td = timedelta(days=1)
end = datetime.now() + td
self.end = end.strftime("%Y-%m-%d")
td = timedelta(days=discInLastDays)
start = datetime.now() - td
self.start = start.strftime("%Y-%m-%d")
# DETERMINE IF WE HAVE A TNS OR INTERAL SURVEY NAME
if self.name:
matchObject = re.match(r'^((SN|AT) ?)?(\d{4}\w{1,6})', self.name)
if matchObject:
self.name = matchObject.group(3)
else:
self.internal_name = self.name
self.name = ""
# DO THE SEARCH OF THE TNS AND COMPILE THE RESULTS INTO SEPARATE RESULT
# SETS
self.sourceResultsList, self.photResultsList, self.specResultsList, self.relatedFilesResultsList = self._query_tns()
self.sourceResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.sourceResultsList
)
self.photResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.photResultsList
)
self.specResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.specResultsList
)
self.relatedFilesResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.relatedFilesResultsList
)
return None
@property
def sources(
self):
"""*The results of the search returned as a python list of dictionaries*
**Usage:**
.. code-block:: python
sources = tns.sources
"""
sourceResultsList = []
sourceResultsList[:] = [dict(l) for l in self.sourceResultsList]
return sourceResultsList
@property
def spectra(
self):
"""*The associated source spectral data*
**Usage:**
.. code-block:: python
sourceSpectra = tns.spectra
"""
specResultsList = []
specResultsList[:] = [dict(l) for l in self.specResultsList]
return specResultsList
@property
def files(
self):
"""*The associated source files*
**Usage:**
.. code-block:: python
sourceFiles = tns.files
"""
relatedFilesResultsList = []
relatedFilesResultsList[:] = [dict(l)
for l in self.relatedFilesResultsList]
return relatedFilesResultsList
@property
def photometry(
self):
"""*The associated source photometry*
**Usage:**
.. code-block:: python
sourcePhotometry = tns.photometry
"""
photResultsList = []
photResultsList[:] = [dict(l) for l in self.photResultsList]
return photResultsList
@property
def url(
self):
"""*The generated URL used for searching of the TNS*
**Usage:**
.. code-block:: python
searchURL = tns.url
"""
return self._searchURL
def csv(
self,
dirPath=None):
"""*Render the results in csv format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `csvSources` -- the top-level transient data
- `csvPhot` -- all photometry associated with the transients
- `csvSpec` -- all spectral data associated with the transients
- `csvFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in csv format:
.. code-block:: python
csvSources, csvPhot, csvSpec, csvFiles = tns.csv()
print csvSources
.. code-block:: text
TNSId,TNSName,discoveryName,discSurvey,raSex,decSex,raDeg,decDeg,transRedshift,specType,discMag,discMagFilter,discDate,objectUrl,hostName,hostRedshift,separationArcsec,separationNorthArcsec,separationEastArcsec
2016asf,SN2016asf,ASASSN-16cs,ASAS-SN,06:50:36.73,+31:06:45.36,102.6530,31.1126,0.021,SN Ia,17.1,V-Johnson,2016-03-06 08:09:36,http://wis-tns.weizmann.ac.il/object/2016asf,KUG 0647+311,,0.66,0.65,-0.13
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.csv("~/tns")
.. image:: https://i.imgur.com/BwwqMBg.png
:width: 800px
:alt: csv output
"""
if dirPath:
p = self._file_prefix()
csvSources = self.sourceResults.csv(
filepath=dirPath + "/" + p + "sources.csv")
csvPhot = self.photResults.csv(
filepath=dirPath + "/" + p + "phot.csv")
csvSpec = self.specResults.csv(
filepath=dirPath + "/" + p + "spec.csv")
csvFiles = self.relatedFilesResults.csv(
filepath=dirPath + "/" + p + "relatedFiles.csv")
else:
csvSources = self.sourceResults.csv()
csvPhot = self.photResults.csv()
csvSpec = self.specResults.csv()
csvFiles = self.relatedFilesResults.csv()
return csvSources, csvPhot, csvSpec, csvFiles
def json(
self,
dirPath=None):
"""*Render the results in json format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `jsonSources` -- the top-level transient data
- `jsonPhot` -- all photometry associated with the transients
- `jsonSpec` -- all spectral data associated with the transients
- `jsonFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in json format:
.. code-block:: python
jsonSources, jsonPhot, jsonSpec, jsonFiles = tns.json()
print jsonSources
.. code-block:: text
[
{
"TNSId": "2016asf",
"TNSName": "SN2016asf",
"decDeg": 31.1126,
"decSex": "+31:06:45.36",
"discDate": "2016-03-06 08:09:36",
"discMag": "17.1",
"discMagFilter": "V-Johnson",
"discSurvey": "ASAS-SN",
"discoveryName": "ASASSN-16cs",
"hostName": "KUG 0647+311",
"hostRedshift": null,
"objectUrl": "http://wis-tns.weizmann.ac.il/object/2016asf",
"raDeg": 102.65304166666667,
"raSex": "06:50:36.73",
"separationArcsec": "0.66",
"separationEastArcsec": "-0.13",
"separationNorthArcsec": "0.65",
"specType": "SN Ia",
"transRedshift": "0.021"
}
]
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.json("~/tns")
.. image:: https://i.imgur.com/wAHqARI.png
:width: 800px
:alt: json output
"""
if dirPath:
p = self._file_prefix()
jsonSources = self.sourceResults.json(
filepath=dirPath + "/" + p + "sources.json")
jsonPhot = self.photResults.json(
filepath=dirPath + "/" + p + "phot.json")
jsonSpec = self.specResults.json(
filepath=dirPath + "/" + p + "spec.json")
jsonFiles = self.relatedFilesResults.json(
filepath=dirPath + "/" + p + "relatedFiles.json")
else:
jsonSources = self.sourceResults.json()
jsonPhot = self.photResults.json()
jsonSpec = self.specResults.json()
jsonFiles = self.relatedFilesResults.json()
return jsonSources, jsonPhot, jsonSpec, jsonFiles
def yaml(
self,
dirPath=None):
"""*Render the results in yaml format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `yamlSources` -- the top-level transient data
- `yamlPhot` -- all photometry associated with the transients
- `yamlSpec` -- all spectral data associated with the transients
- `yamlFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in yaml format:
.. code-block:: python
yamlSources, yamlPhot, yamlSpec, yamlFiles = tns.yaml()
print yamlSources
.. code-block:: text
- TNSId: 2016asf
TNSName: SN2016asf
decDeg: 31.1126
decSex: '+31:06:45.36'
discDate: '2016-03-06 08:09:36'
discMag: '17.1'
discMagFilter: V-Johnson
discSurvey: ASAS-SN
discoveryName: ASASSN-16cs
hostName: KUG 0647+311
hostRedshift: null
objectUrl: http://wis-tns.weizmann.ac.il/object/2016asf
raDeg: 102.65304166666667
raSex: '06:50:36.73'
separationArcsec: '0.66'
separationEastArcsec: '-0.13'
separationNorthArcsec: '0.65'
specType: SN Ia
transRedshift: '0.021'
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.yaml("~/tns")
.. image:: https://i.imgur.com/ZpJIC6p.png
:width: 800px
:alt: yaml output
"""
if dirPath:
p = self._file_prefix()
yamlSources = self.sourceResults.yaml(
filepath=dirPath + "/" + p + "sources.yaml")
yamlPhot = self.photResults.yaml(
filepath=dirPath + "/" + p + "phot.yaml")
yamlSpec = self.specResults.yaml(
filepath=dirPath + "/" + p + "spec.yaml")
yamlFiles = self.relatedFilesResults.yaml(
filepath=dirPath + "/" + p + "relatedFiles.yaml")
else:
yamlSources = self.sourceResults.yaml()
yamlPhot = self.photResults.yaml()
yamlSpec = self.specResults.yaml()
yamlFiles = self.relatedFilesResults.yaml()
return yamlSources, yamlPhot, yamlSpec, yamlFiles
def markdown(
self,
dirPath=None):
"""*Render the results in markdown format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `markdownSources` -- the top-level transient data
- `markdownPhot` -- all photometry associated with the transients
- `markdownSpec` -- all spectral data associated with the transients
- `markdownFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in markdown table format:
.. code-block:: python
markdownSources, markdownPhot, markdownSpec, markdownFiles = tns.markdown()
print markdownSources
.. code-block:: text
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
|:---------|:-----------|:---------------|:------------|:-------------|:--------------|:----------|:---------|:---------------|:----------|:---------|:---------------|:---------------------|:----------------------------------------------|:--------------|:--------------|:------------------|:-----------------------|:----------------------|
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.markdown("~/tns")
.. image:: https://i.imgur.com/AYLBQoJ.png
:width: 800px
:alt: markdown output
"""
if dirPath:
p = self._file_prefix()
markdownSources = self.sourceResults.markdown(
filepath=dirPath + "/" + p + "sources.md")
markdownPhot = self.photResults.markdown(
filepath=dirPath + "/" + p + "phot.md")
markdownSpec = self.specResults.markdown(
filepath=dirPath + "/" + p + "spec.md")
markdownFiles = self.relatedFilesResults.markdown(
filepath=dirPath + "/" + p + "relatedFiles.md")
else:
markdownSources = self.sourceResults.markdown()
markdownPhot = self.photResults.markdown()
markdownSpec = self.specResults.markdown()
markdownFiles = self.relatedFilesResults.markdown()
return markdownSources, markdownPhot, markdownSpec, markdownFiles
def table(
self,
dirPath=None):
"""*Render the results as an ascii table*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `tableSources` -- the top-level transient data
- `tablePhot` -- all photometry associated with the transients
- `tableSpec` -- all spectral data associated with the transients
- `tableFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in ascii table format:
.. code-block:: python
tableSources, tablePhot, tableSpec, tableFiles = tns.table()
print tableSources
.. code-block:: text
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.table("~/tns")
.. image:: https://i.imgur.com/m09M0ho.png
:width: 800px
:alt: ascii files
"""
if dirPath:
p = self._file_prefix()
tableSources = self.sourceResults.table(
filepath=dirPath + "/" + p + "sources.ascii")
tablePhot = self.photResults.table(
filepath=dirPath + "/" + p + "phot.ascii")
tableSpec = self.specResults.table(
filepath=dirPath + "/" + p + "spec.ascii")
tableFiles = self.relatedFilesResults.table(
filepath=dirPath + "/" + p + "relatedFiles.ascii")
else:
tableSources = self.sourceResults.table()
tablePhot = self.photResults.table()
tableSpec = self.specResults.table()
tableFiles = self.relatedFilesResults.table()
return tableSources, tablePhot, tableSpec, tableFiles
def mysql(
self,
tableNamePrefix="TNS",
dirPath=None):
"""*Render the results as MySQL Insert statements*
**Key Arguments:**
- ``tableNamePrefix`` -- the prefix for the database table names to assign the insert statements to. Default *TNS*.
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `mysqlSources` -- the top-level transient data
- `mysqlPhot` -- all photometry associated with the transients
- `mysqlSpec` -- all spectral data associated with the transients
- `mysqlFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in mysql insert format:
.. code-block:: python
mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles = tns.mysql("TNS")
print mysqlSources
.. code-block:: text
INSERT INTO `TNS_sources` (TNSId,TNSName,dateCreated,decDeg,decSex,discDate,discMag,discMagFilter,discSurvey,discoveryName,hostName,hostRedshift,objectUrl,raDeg,raSex,separationArcsec,separationEastArcsec,separationNorthArcsec,specType,transRedshift) VALUES ("2016asf" ,"SN2016asf" ,"2016-09-20T11:22:13" ,"31.1126" ,"+31:06:45.36" ,"2016-03-06 08:09:36" ,"17.1" ,"V-Johnson" ,"ASAS-SN" ,"ASASSN-16cs" ,"KUG 0647+311" ,null ,"http://wis-tns.weizmann.ac.il/object/2016asf" ,"102.653041667" ,"06:50:36.73" ,"0.66" ,"-0.13" ,"0.65" ,"SN Ia" ,"0.021") ON DUPLICATE KEY UPDATE TNSId="2016asf", TNSName="SN2016asf", dateCreated="2016-09-20T11:22:13", decDeg="31.1126", decSex="+31:06:45.36", discDate="2016-03-06 08:09:36", discMag="17.1", discMagFilter="V-Johnson", discSurvey="ASAS-SN", discoveryName="ASASSN-16cs", hostName="KUG 0647+311", hostRedshift=null, objectUrl="http://wis-tns.weizmann.ac.il/object/2016asf", raDeg="102.653041667", raSex="06:50:36.73", separationArcsec="0.66", separationEastArcsec="-0.13", separationNorthArcsec="0.65", specType="SN Ia", transRedshift="0.021", updated=1, dateLastModified=NOW() ;
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.mysql("TNS", "~/tns")
.. image:: https://i.imgur.com/CozySPW.png
:width: 800px
:alt: mysql output
"""
if dirPath:
p = self._file_prefix()
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_sources` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(20) NOT NULL,
`TNSName` varchar(20) DEFAULT NULL,
`dateCreated` datetime DEFAULT NULL,
`decDeg` double DEFAULT NULL,
`decSex` varchar(45) DEFAULT NULL,
`discDate` datetime DEFAULT NULL,
`discMag` double DEFAULT NULL,
`discMagFilter` varchar(45) DEFAULT NULL,
`discSurvey` varchar(100) DEFAULT NULL,
`discoveryName` varchar(100) DEFAULT NULL,
`objectUrl` varchar(200) DEFAULT NULL,
`raDeg` double DEFAULT NULL,
`raSex` varchar(45) DEFAULT NULL,
`specType` varchar(100) DEFAULT NULL,
`transRedshift` double DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`hostName` VARCHAR(100) NULL DEFAULT NULL,
`hostRedshift` DOUBLE NULL DEFAULT NULL,
`survey` VARCHAR(100) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid` (`TNSId`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlSources = self.sourceResults.mysql(
tableNamePrefix + "_sources", filepath=dirPath + "/" + p + "sources.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_photometry` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(20) NOT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`exptime` double DEFAULT NULL,
`filter` varchar(100) DEFAULT NULL,
`limitingMag` tinyint(4) DEFAULT NULL,
`mag` double DEFAULT NULL,
`magErr` double DEFAULT NULL,
`magUnit` varchar(100) DEFAULT NULL,
`objectName` varchar(100) DEFAULT NULL,
`obsdate` datetime DEFAULT NULL,
`reportAddedDate` datetime DEFAULT NULL,
`suggestedType` varchar(100) DEFAULT NULL,
`survey` varchar(100) DEFAULT NULL,
`telescope` varchar(100) DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`remarks` VARCHAR(800) NULL DEFAULT NULL,
`sourceComment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid_survey_obsdate` (`TNSId`,`survey`,`obsdate`),
UNIQUE INDEX `u_tnsid_survey_obsdate` (`TNSId` ASC, `survey` ASC, `obsdate` ASC),
UNIQUE INDEX `u_tnsid_obsdate_objname` (`TNSId` ASC, `obsdate` ASC, `objectName` ASC)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlPhot = self.photResults.mysql(
tableNamePrefix + "_photometry", filepath=dirPath + "/" + p + "phot.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_spectra` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(45) NOT NULL,
`TNSuser` varchar(45) DEFAULT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`exptime` double DEFAULT NULL,
`obsdate` datetime DEFAULT NULL,
`reportAddedDate` datetime DEFAULT NULL,
`specType` varchar(100) DEFAULT NULL,
`survey` varchar(100) DEFAULT NULL,
`telescope` varchar(100) DEFAULT NULL,
`transRedshift` double DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`remarks` VARCHAR(800) NULL DEFAULT NULL,
`sourceComment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `u_tnsid_survey_obsdate` (`TNSId`,`survey`,`obsdate`),
UNIQUE KEY `u_id_user_obsdate` (`TNSId`,`TNSuser`,`obsdate`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlSpec = self.specResults.mysql(
tableNamePrefix + "_spectra", filepath=dirPath + "/" + p + "spec.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_files` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(100) NOT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`dateObs` datetime DEFAULT NULL,
`filename` varchar(200) DEFAULT NULL,
`spec1phot2` tinyint(4) DEFAULT NULL,
`url` varchar(800) DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`comment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid_url` (`TNSId`,`url`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlFiles = self.relatedFilesResults.mysql(
tableNamePrefix + "_files", filepath=dirPath + "/" + p + "relatedFiles.sql", createStatement=createStatement)
else:
mysqlSources = self.sourceResults.mysql(
tableNamePrefix + "_sources")
mysqlPhot = self.photResults.mysql(tableNamePrefix + "_photometry")
mysqlSpec = self.specResults.mysql(tableNamePrefix + "_spectra")
mysqlFiles = self.relatedFilesResults.mysql(
tableNamePrefix + "_files")
return mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles
def _query_tns(self):
"""
*determine how to query the TNS, send query and parse the results*
**Return:**
- ``results`` -- a list of dictionaries (one dictionary for each result set returned from the TNS)
"""
self.log.info('starting the ``get`` method')
sourceTable = []
photoTable = []
specTable = []
relatedFilesTable = []
# THIS stop IS TO KEEP TRACK OF THE TNS PAGINATION IF MANY RESULT PAGES
# ARE RETURNED
stop = False
sourceCount = 0
while not stop:
status_code, content, self._searchURL = self._get_tns_search_results()
if status_code != 200:
self.log.error(
'cound not get the search reuslts from the TNS, HTML error code %(status_code)s ' % locals())
return None
if "No results found" in content:
print "No results found"
return sourceTable, photoTable, specTable, relatedFilesTable
if self._parse_transient_rows(content, True) < self.batchSize:
stop = True
else:
self.page += 1
thisPage = self.page
print "Downloaded %(thisPage)s page(s) from the TNS. %(sourceCount)s transients parsed so far." % locals()
sourceCount += self.batchSize
print "\t" + self._searchURL
timesleep.sleep(1)
# PARSE ALL ROWS RETURNED
for transientRow in self._parse_transient_rows(content):
# TOP LEVEL DISCOVERY CONTENT
sourceContent = transientRow.group()
discInfo, TNSId = self._parse_discovery_information(
sourceContent)
sourceTable.append(discInfo)
# PHOTOMETERY
phot, relatedFiles = self._parse_photometry_data(
sourceContent, TNSId)
photoTable += phot
relatedFilesTable += relatedFiles
# SPECTRA
spec, relatedFiles = self._parse_spectral_data(
sourceContent, TNSId)
specTable += spec
relatedFilesTable += relatedFiles
# SORT BY SEPARATION FROM THE SEARCH COORDINATES
try:
sourceTable = sorted(sourceTable, key=itemgetter(
'separationArcsec'), reverse=False)
except:
pass
self.log.info('completed the ``get`` method')
return sourceTable, photoTable, specTable, relatedFilesTable
def _get_tns_search_results(
self):
"""
*query the tns and result the response*
"""
self.log.info('starting the ``_get_tns_search_results`` method')
try:
response = requests.get(
url="http://wis-tns.weizmann.ac.il/search",
params={
"page": self.page,
"ra": self.ra,
"decl": self.dec,
"radius": self.radiusArcsec,
"name": self.name,
"internal_name": self.internal_name,
"date_start[date]": self.start,
"date_end[date]": self.end,
"num_page": self.batchSize,
"display[redshift]": "1",
"display[hostname]": "1",
"display[host_redshift]": "1",
"display[source_group_name]": "1",
"display[internal_name]": "1",
"display[spectra_count]": "1",
"display[discoverymag]": "1",
"display[discmagfilter]": "1",
"display[discoverydate]": "1",
"display[discoverer]": "1",
"display[sources]": "1",
"display[bibcode]": "1",
},
)
except requests.exceptions.RequestException:
print('HTTP Request failed')
self.log.info('completed the ``_get_tns_search_results`` method')
return response.status_code, response.content, response.url
def _file_prefix(
self):
"""*Generate a file prefix based on the type of search for saving files to disk*
**Return:**
- ``prefix`` -- the file prefix
"""
self.log.info('starting the ``_file_prefix`` method')
if self.ra:
now = datetime.now()
prefix = now.strftime("%Y%m%dt%H%M%S%f_tns_conesearch_")
elif self.name:
prefix = self.name + "_tns_conesearch_"
elif self.internal_name:
prefix = self.internal_name + "_tns_conesearch_"
elif self.discInLastDays:
discInLastDays = str(self.discInLastDays)
now = datetime.now()
prefix = now.strftime(
discInLastDays + "d_since_%Y%m%d_tns_conesearch_")
self.log.info('completed the ``_file_prefix`` method')
return prefix
def _parse_transient_rows(
self,
content,
count=False):
"""* parse transient rows from the TNS result page content*
**Key Arguments:**
- ``content`` -- the content from the TNS results page.
- ``count`` -- return only the number of rows
**Return:**
- ``transientRows``
"""
self.log.info('starting the ``_parse_transient_rows`` method')
regexForRow = r"""\n([^\n]*?<a href="/object/.*?)(?=\n[^\n]*?<a href="/object/|<\!\-\- /\.section, /#content \-\->)"""
if count:
# A SINGLE SOURCE BLOCK
matchedSources = re.findall(
regexForRow,
content,
flags=re.S # re.S
)
return len(matchedSources)
# A SINGLE SOURCE BLOCK
matchedSources = re.finditer(
regexForRow,
content,
flags=re.S # re.S
)
self.log.info('completed the ``_parse_transient_rows`` method')
return matchedSources
def _parse_discovery_information(
self,
content):
"""* parse discovery information from one row on the TNS results page*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page.
**Return:**
- ``discoveryData`` -- dictionary of results
- ``TNSId`` -- the unique TNS id for the transient
"""
self.log.info('starting the ``_parse_discovery_information`` method')
# ASTROCALC UNIT CONVERTER OBJECT
converter = unit_conversion(
log=self.log
)
matches = re.finditer(
r"""<tr class="row-.*?"><td class="cell-id">(?P<tnsId>\d*?)</td><td class="cell-name"><a href="(?P<objectUrl>.*?)">(?P<TNSName>.*?)</a></td><td class="cell-.*?<td class="cell-ra">(?P<raSex>.*?)</td><td class="cell-decl">(?P<decSex>.*?)</td><td class="cell-ot_name">(?P<specType>.*?)</td><td class="cell-redshift">(?P<transRedshift>.*?)</td><td class="cell-hostname">(?P<hostName>.*?)</td><td class="cell-host_redshift">(?P<hostRedshift>.*?)</td><td class="cell-source_group_name">(?P<discSurvey>.*?)</td>.*?<td class="cell-internal_name">(<a.*?>)?(?P<discoveryName>.*?)(</a>)?</td>.*?<td class="cell-discoverymag">(?P<discMag>.*?)</td><td class="cell-disc_filter_name">(?P<discMagFilter>.*?)</td><td class="cell-discoverydate">(?P<discDate>.*?)</td><td class="cell-discoverer">(?P<sender>.*?)</td>.*?</tr>""",
content,
flags=0 # re.S
)
discoveryData = []
for match in matches:
row = match.groupdict()
for k, v in row.iteritems():
row[k] = v.strip()
if len(v) == 0:
row[k] = None
if row["transRedshift"] == 0:
row["transRedshift"] = None
if row["TNSName"][0] in ["1", "2"]:
row["TNSName"] = "SN" + row["TNSName"]
row["objectUrl"] = "http://wis-tns.weizmann.ac.il" + \
row["objectUrl"]
# CONVERT COORDINATES TO DECIMAL DEGREES
row["raDeg"] = converter.ra_sexegesimal_to_decimal(
ra=row["raSex"]
)
row["decDeg"] = converter.dec_sexegesimal_to_decimal(
dec=row["decSex"]
)
# IF THIS IS A COORDINATE SEARCH, ADD SEPARATION FROM
# ORIGINAL QUERY COORDINATES
if self.ra:
# CALCULATE SEPARATION IN ARCSEC
from astrocalc.coords import separations
calculator = separations(
log=self.log,
ra1=self.ra,
dec1=self.dec,
ra2=row["raDeg"],
dec2=row["decDeg"],
)
angularSeparation, north, east = calculator.get()
row["separationArcsec"] = angularSeparation
row["separationNorthArcsec"] = north
row["separationEastArcsec"] = east
if not row["discSurvey"]:
row["survey"] = row["sender"]
del row["sender"]
del row["tnsId"]
row["TNSName"] = row["TNSName"].replace(" ", "")
row["TNSId"] = row["TNSName"].replace(
"SN", "").replace("AT", "")
TNSId = row["TNSId"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "TNSName", "discoveryName", "discSurvey", "raSex", "decSex", "raDeg", "decDeg",
"transRedshift", "specType", "discMag", "discMagFilter", "discDate", "objectUrl", "hostName", "hostRedshift", "separationArcsec", "separationNorthArcsec", "separationEastArcsec"]
for k, v in row.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = row[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
discoveryData.append(row)
self.log.info('completed the ``_parse_discovery_information`` method')
return discoveryData[0], TNSId
def _parse_related_files(
self,
content):
"""*parse the contents for related files URLs and comments*
**Key Arguments:**
- ``content`` -- the content to parse.
**Return:**
- ``relatedFiles`` -- a list of dictionaries of transient related files
"""
self.log.info('starting the ``_parse_related_files`` method')
relatedFilesList = re.finditer(
r"""<td class="cell-filename">.*?href="(?P<filepath>[^"]*).*?remarks">(?P<fileComment>[^<]*)""",
content,
flags=0 # re.S
)
relatedFiles = []
for f in relatedFilesList:
f = f.groupdict()
relatedFiles.append(f)
self.log.info('completed the ``_parse_related_files`` method')
return relatedFiles
def _parse_spectral_data(
self,
content,
TNSId):
"""*parse spectra data from a row in the tns results content*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page
- ``TNSId`` -- the tns id of the transient
**Return:**
- ``specData`` -- a list of dictionaries of the spectral data
- ``relatedFilesTable`` -- a list of dictionaries of transient spectrum related files
"""
self.log.info('starting the ``_parse_spectral_data`` method')
specData = []
relatedFilesTable = []
# CLASSIFICATION BLOCK
classBlock = re.search(
r"""<tr class=[^\n]*?Classification reportings.*$""",
content,
flags=re.S # re.S
)
if classBlock:
classBlock = classBlock.group()
reports = re.finditer(
r"""<tr class="row-[^"]*"><td class="cell-id">.*?</tbody>\s*</table>\s*</div></td> </tr>\s*</tbody>\s*</table>\s*</div></td> </tr>""",
classBlock,
flags=re.S #
)
relatedFiles = self._parse_related_files(classBlock)
for r in reports:
header = re.search(
r"""<tr class="row.*?time_received">(?P<reportAddedDate>[^<]*).*?user_name">(?P<TNSuser>[^<]*).*?classifier_name">(?P<reporters>[^<]*).*?source_group_name">(?P<survey>[^<]*).*?-type">(?P<specType>[^<]*).*?-redshift">(?P<transRedshift>[^<]*).*?-related_files">(?P<relatedFiles>[^<]*).*?-groups">(?P<surveyGroup>[^<]*).*?-remarks">(?P<sourceComment>[^<]*)</td>""",
r.group(),
flags=re.S # re.S
)
if not header:
continue
header = header.groupdict()
header["TNSId"] = TNSId
del header["reporters"]
del header["surveyGroup"]
del header["survey"]
if not self.comments:
del header['sourceComment']
else:
theseComments = header[
"sourceComment"].split("\n")
header["sourceComment"] = ""
for c in theseComments:
header["sourceComment"] += " " + c.strip()
header["sourceComment"] = header[
"sourceComment"].strip().replace('"', "'")[0:750]
spec = re.finditer(
r"""<tr class="class-results-.*?-obsdate">(?P<obsdate>[^<]*).*?-tel_inst">(?P<telescope>[^<]*).*?-exptime">(?P<exptime>[^<]*).*?-observer">(?P<sender>[^<]*).*?-reducer">(?P<reducer>[^<]*).*?-source_group_name">(?P<survey>[^<]*).*?-asciifile">(.*?<a href="(?P<filepath>[^"]*)".*?</a>)?.*?-fitsfile">(.*?<a href="(?P<fitsFilepath>[^"]*)".*?</a>)?.*?-groups">(?P<surveyGroup>[^<]*).*?-remarks">(?P<remarks>[^<]*)""",
r.group(),
flags=0 # re.S
)
filesAppended = False
for s in spec:
s = s.groupdict()
del s["sender"]
del s["surveyGroup"]
del s["reducer"]
if not self.comments:
del s["remarks"]
else:
s["remarks"] = s["remarks"].replace('"', "'")[0:750]
s.update(header)
if s["relatedFiles"] and filesAppended == False:
filesAppended = True
for f in relatedFiles:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = f[
"filepath"].split("/")[-1]
thisFile["url"] = f["filepath"]
if self.comments:
thisFile["comment"] = f[
"fileComment"].replace("\n", " ").strip()
thisFile["dateObs"] = s["obsdate"]
thisFile["spec1phot2"] = 1
relatedFilesTable.append(thisFile)
for ffile in [s["filepath"], s["fitsFilepath"]]:
if ffile:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = ffile.split(
"/")[-1]
thisFile["url"] = ffile
if self.comments:
thisFile["comment"] = ""
thisFile["dateObs"] = s["obsdate"]
thisFile["spec1phot2"] = 1
relatedFilesTable.append(thisFile)
del s["filepath"]
del s["fitsFilepath"]
del s["relatedFiles"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "survey", "obsdate", "specType", "transRedshift",
"telescope", "exptime", "reportAddedDate", "TNSuser"]
for k, v in s.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = s[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
specData.append(orow)
self.log.info('completed the ``_parse_spectral_data`` method')
return specData, relatedFilesTable
|
thespacedoctor/transientNamer
|
transientNamer/search.py
|
search._parse_related_files
|
python
|
def _parse_related_files(
self,
content):
self.log.info('starting the ``_parse_related_files`` method')
relatedFilesList = re.finditer(
r"""<td class="cell-filename">.*?href="(?P<filepath>[^"]*).*?remarks">(?P<fileComment>[^<]*)""",
content,
flags=0 # re.S
)
relatedFiles = []
for f in relatedFilesList:
f = f.groupdict()
relatedFiles.append(f)
self.log.info('completed the ``_parse_related_files`` method')
return relatedFiles
|
*parse the contents for related files URLs and comments*
**Key Arguments:**
- ``content`` -- the content to parse.
**Return:**
- ``relatedFiles`` -- a list of dictionaries of transient related files
|
train
|
https://github.com/thespacedoctor/transientNamer/blob/39be410c84275ed4669632f5df67e728d66a318f/transientNamer/search.py#L1108-L1133
| null |
class search():
"""
*The worker class for the transient namer search module*
**Key Arguments:**
- ``log`` -- logger
- ``settings`` -- the settings dictionary
- ``ra`` -- RA of the location being checked
- ``dec`` -- DEC of the location being searched
- ``radiusArcsec`` - the radius of the conesearch to perform against the TNS
- ``name`` -- name of the object to search the TNS for
- ``discInLastDays`` -- search the TNS for transient discovered in the last X days
- ``comments`` -- print the comments from the TNS, note these can be long making table outputs somewhat unreadable. Default *False*
**Usage:**
To initiate a search object to search the TNS via an object name (either TNS or survey names accepted):
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
name="Gaia16bbi"
)
or for a conesearch use something similar to:
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
ra="06:50:36.74",
dec="+31:06:44.7",
radiusArcsec=5
)
Note the search method can accept coordinates in sexagesimal or decimal defree formats.
To list all new objects discovered in the last three weeks, then use:
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
discInLastDays=21
)
"""
# Initialisation
def __init__(
self,
log,
ra="",
dec="",
radiusArcsec="",
name="",
discInLastDays="",
settings=False,
comments=False
):
self.log = log
log.debug("instansiating a new 'search' object")
self.settings = settings
self.ra = ra
self.dec = dec
self.radiusArcsec = radiusArcsec
self.comments = comments
self.name = name
self.internal_name = ""
self.discInLastDays = discInLastDays
self.page = 0
self.batchSize = 1000
# CREATE THE TIME-RANGE WINDOW TO SEARCH TNS
if not discInLastDays:
self.start = ""
self.end = ""
else:
discInLastDays = int(discInLastDays)
td = timedelta(days=1)
end = datetime.now() + td
self.end = end.strftime("%Y-%m-%d")
td = timedelta(days=discInLastDays)
start = datetime.now() - td
self.start = start.strftime("%Y-%m-%d")
# DETERMINE IF WE HAVE A TNS OR INTERAL SURVEY NAME
if self.name:
matchObject = re.match(r'^((SN|AT) ?)?(\d{4}\w{1,6})', self.name)
if matchObject:
self.name = matchObject.group(3)
else:
self.internal_name = self.name
self.name = ""
# DO THE SEARCH OF THE TNS AND COMPILE THE RESULTS INTO SEPARATE RESULT
# SETS
self.sourceResultsList, self.photResultsList, self.specResultsList, self.relatedFilesResultsList = self._query_tns()
self.sourceResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.sourceResultsList
)
self.photResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.photResultsList
)
self.specResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.specResultsList
)
self.relatedFilesResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.relatedFilesResultsList
)
return None
@property
def sources(
self):
"""*The results of the search returned as a python list of dictionaries*
**Usage:**
.. code-block:: python
sources = tns.sources
"""
sourceResultsList = []
sourceResultsList[:] = [dict(l) for l in self.sourceResultsList]
return sourceResultsList
@property
def spectra(
self):
"""*The associated source spectral data*
**Usage:**
.. code-block:: python
sourceSpectra = tns.spectra
"""
specResultsList = []
specResultsList[:] = [dict(l) for l in self.specResultsList]
return specResultsList
@property
def files(
self):
"""*The associated source files*
**Usage:**
.. code-block:: python
sourceFiles = tns.files
"""
relatedFilesResultsList = []
relatedFilesResultsList[:] = [dict(l)
for l in self.relatedFilesResultsList]
return relatedFilesResultsList
@property
def photometry(
self):
"""*The associated source photometry*
**Usage:**
.. code-block:: python
sourcePhotometry = tns.photometry
"""
photResultsList = []
photResultsList[:] = [dict(l) for l in self.photResultsList]
return photResultsList
@property
def url(
self):
"""*The generated URL used for searching of the TNS*
**Usage:**
.. code-block:: python
searchURL = tns.url
"""
return self._searchURL
def csv(
self,
dirPath=None):
"""*Render the results in csv format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `csvSources` -- the top-level transient data
- `csvPhot` -- all photometry associated with the transients
- `csvSpec` -- all spectral data associated with the transients
- `csvFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in csv format:
.. code-block:: python
csvSources, csvPhot, csvSpec, csvFiles = tns.csv()
print csvSources
.. code-block:: text
TNSId,TNSName,discoveryName,discSurvey,raSex,decSex,raDeg,decDeg,transRedshift,specType,discMag,discMagFilter,discDate,objectUrl,hostName,hostRedshift,separationArcsec,separationNorthArcsec,separationEastArcsec
2016asf,SN2016asf,ASASSN-16cs,ASAS-SN,06:50:36.73,+31:06:45.36,102.6530,31.1126,0.021,SN Ia,17.1,V-Johnson,2016-03-06 08:09:36,http://wis-tns.weizmann.ac.il/object/2016asf,KUG 0647+311,,0.66,0.65,-0.13
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.csv("~/tns")
.. image:: https://i.imgur.com/BwwqMBg.png
:width: 800px
:alt: csv output
"""
if dirPath:
p = self._file_prefix()
csvSources = self.sourceResults.csv(
filepath=dirPath + "/" + p + "sources.csv")
csvPhot = self.photResults.csv(
filepath=dirPath + "/" + p + "phot.csv")
csvSpec = self.specResults.csv(
filepath=dirPath + "/" + p + "spec.csv")
csvFiles = self.relatedFilesResults.csv(
filepath=dirPath + "/" + p + "relatedFiles.csv")
else:
csvSources = self.sourceResults.csv()
csvPhot = self.photResults.csv()
csvSpec = self.specResults.csv()
csvFiles = self.relatedFilesResults.csv()
return csvSources, csvPhot, csvSpec, csvFiles
def json(
self,
dirPath=None):
"""*Render the results in json format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `jsonSources` -- the top-level transient data
- `jsonPhot` -- all photometry associated with the transients
- `jsonSpec` -- all spectral data associated with the transients
- `jsonFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in json format:
.. code-block:: python
jsonSources, jsonPhot, jsonSpec, jsonFiles = tns.json()
print jsonSources
.. code-block:: text
[
{
"TNSId": "2016asf",
"TNSName": "SN2016asf",
"decDeg": 31.1126,
"decSex": "+31:06:45.36",
"discDate": "2016-03-06 08:09:36",
"discMag": "17.1",
"discMagFilter": "V-Johnson",
"discSurvey": "ASAS-SN",
"discoveryName": "ASASSN-16cs",
"hostName": "KUG 0647+311",
"hostRedshift": null,
"objectUrl": "http://wis-tns.weizmann.ac.il/object/2016asf",
"raDeg": 102.65304166666667,
"raSex": "06:50:36.73",
"separationArcsec": "0.66",
"separationEastArcsec": "-0.13",
"separationNorthArcsec": "0.65",
"specType": "SN Ia",
"transRedshift": "0.021"
}
]
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.json("~/tns")
.. image:: https://i.imgur.com/wAHqARI.png
:width: 800px
:alt: json output
"""
if dirPath:
p = self._file_prefix()
jsonSources = self.sourceResults.json(
filepath=dirPath + "/" + p + "sources.json")
jsonPhot = self.photResults.json(
filepath=dirPath + "/" + p + "phot.json")
jsonSpec = self.specResults.json(
filepath=dirPath + "/" + p + "spec.json")
jsonFiles = self.relatedFilesResults.json(
filepath=dirPath + "/" + p + "relatedFiles.json")
else:
jsonSources = self.sourceResults.json()
jsonPhot = self.photResults.json()
jsonSpec = self.specResults.json()
jsonFiles = self.relatedFilesResults.json()
return jsonSources, jsonPhot, jsonSpec, jsonFiles
def yaml(
self,
dirPath=None):
"""*Render the results in yaml format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `yamlSources` -- the top-level transient data
- `yamlPhot` -- all photometry associated with the transients
- `yamlSpec` -- all spectral data associated with the transients
- `yamlFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in yaml format:
.. code-block:: python
yamlSources, yamlPhot, yamlSpec, yamlFiles = tns.yaml()
print yamlSources
.. code-block:: text
- TNSId: 2016asf
TNSName: SN2016asf
decDeg: 31.1126
decSex: '+31:06:45.36'
discDate: '2016-03-06 08:09:36'
discMag: '17.1'
discMagFilter: V-Johnson
discSurvey: ASAS-SN
discoveryName: ASASSN-16cs
hostName: KUG 0647+311
hostRedshift: null
objectUrl: http://wis-tns.weizmann.ac.il/object/2016asf
raDeg: 102.65304166666667
raSex: '06:50:36.73'
separationArcsec: '0.66'
separationEastArcsec: '-0.13'
separationNorthArcsec: '0.65'
specType: SN Ia
transRedshift: '0.021'
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.yaml("~/tns")
.. image:: https://i.imgur.com/ZpJIC6p.png
:width: 800px
:alt: yaml output
"""
if dirPath:
p = self._file_prefix()
yamlSources = self.sourceResults.yaml(
filepath=dirPath + "/" + p + "sources.yaml")
yamlPhot = self.photResults.yaml(
filepath=dirPath + "/" + p + "phot.yaml")
yamlSpec = self.specResults.yaml(
filepath=dirPath + "/" + p + "spec.yaml")
yamlFiles = self.relatedFilesResults.yaml(
filepath=dirPath + "/" + p + "relatedFiles.yaml")
else:
yamlSources = self.sourceResults.yaml()
yamlPhot = self.photResults.yaml()
yamlSpec = self.specResults.yaml()
yamlFiles = self.relatedFilesResults.yaml()
return yamlSources, yamlPhot, yamlSpec, yamlFiles
def markdown(
self,
dirPath=None):
"""*Render the results in markdown format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `markdownSources` -- the top-level transient data
- `markdownPhot` -- all photometry associated with the transients
- `markdownSpec` -- all spectral data associated with the transients
- `markdownFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in markdown table format:
.. code-block:: python
markdownSources, markdownPhot, markdownSpec, markdownFiles = tns.markdown()
print markdownSources
.. code-block:: text
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
|:---------|:-----------|:---------------|:------------|:-------------|:--------------|:----------|:---------|:---------------|:----------|:---------|:---------------|:---------------------|:----------------------------------------------|:--------------|:--------------|:------------------|:-----------------------|:----------------------|
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.markdown("~/tns")
.. image:: https://i.imgur.com/AYLBQoJ.png
:width: 800px
:alt: markdown output
"""
if dirPath:
p = self._file_prefix()
markdownSources = self.sourceResults.markdown(
filepath=dirPath + "/" + p + "sources.md")
markdownPhot = self.photResults.markdown(
filepath=dirPath + "/" + p + "phot.md")
markdownSpec = self.specResults.markdown(
filepath=dirPath + "/" + p + "spec.md")
markdownFiles = self.relatedFilesResults.markdown(
filepath=dirPath + "/" + p + "relatedFiles.md")
else:
markdownSources = self.sourceResults.markdown()
markdownPhot = self.photResults.markdown()
markdownSpec = self.specResults.markdown()
markdownFiles = self.relatedFilesResults.markdown()
return markdownSources, markdownPhot, markdownSpec, markdownFiles
def table(
self,
dirPath=None):
"""*Render the results as an ascii table*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `tableSources` -- the top-level transient data
- `tablePhot` -- all photometry associated with the transients
- `tableSpec` -- all spectral data associated with the transients
- `tableFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in ascii table format:
.. code-block:: python
tableSources, tablePhot, tableSpec, tableFiles = tns.table()
print tableSources
.. code-block:: text
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.table("~/tns")
.. image:: https://i.imgur.com/m09M0ho.png
:width: 800px
:alt: ascii files
"""
if dirPath:
p = self._file_prefix()
tableSources = self.sourceResults.table(
filepath=dirPath + "/" + p + "sources.ascii")
tablePhot = self.photResults.table(
filepath=dirPath + "/" + p + "phot.ascii")
tableSpec = self.specResults.table(
filepath=dirPath + "/" + p + "spec.ascii")
tableFiles = self.relatedFilesResults.table(
filepath=dirPath + "/" + p + "relatedFiles.ascii")
else:
tableSources = self.sourceResults.table()
tablePhot = self.photResults.table()
tableSpec = self.specResults.table()
tableFiles = self.relatedFilesResults.table()
return tableSources, tablePhot, tableSpec, tableFiles
def mysql(
self,
tableNamePrefix="TNS",
dirPath=None):
"""*Render the results as MySQL Insert statements*
**Key Arguments:**
- ``tableNamePrefix`` -- the prefix for the database table names to assign the insert statements to. Default *TNS*.
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `mysqlSources` -- the top-level transient data
- `mysqlPhot` -- all photometry associated with the transients
- `mysqlSpec` -- all spectral data associated with the transients
- `mysqlFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in mysql insert format:
.. code-block:: python
mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles = tns.mysql("TNS")
print mysqlSources
.. code-block:: text
INSERT INTO `TNS_sources` (TNSId,TNSName,dateCreated,decDeg,decSex,discDate,discMag,discMagFilter,discSurvey,discoveryName,hostName,hostRedshift,objectUrl,raDeg,raSex,separationArcsec,separationEastArcsec,separationNorthArcsec,specType,transRedshift) VALUES ("2016asf" ,"SN2016asf" ,"2016-09-20T11:22:13" ,"31.1126" ,"+31:06:45.36" ,"2016-03-06 08:09:36" ,"17.1" ,"V-Johnson" ,"ASAS-SN" ,"ASASSN-16cs" ,"KUG 0647+311" ,null ,"http://wis-tns.weizmann.ac.il/object/2016asf" ,"102.653041667" ,"06:50:36.73" ,"0.66" ,"-0.13" ,"0.65" ,"SN Ia" ,"0.021") ON DUPLICATE KEY UPDATE TNSId="2016asf", TNSName="SN2016asf", dateCreated="2016-09-20T11:22:13", decDeg="31.1126", decSex="+31:06:45.36", discDate="2016-03-06 08:09:36", discMag="17.1", discMagFilter="V-Johnson", discSurvey="ASAS-SN", discoveryName="ASASSN-16cs", hostName="KUG 0647+311", hostRedshift=null, objectUrl="http://wis-tns.weizmann.ac.il/object/2016asf", raDeg="102.653041667", raSex="06:50:36.73", separationArcsec="0.66", separationEastArcsec="-0.13", separationNorthArcsec="0.65", specType="SN Ia", transRedshift="0.021", updated=1, dateLastModified=NOW() ;
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.mysql("TNS", "~/tns")
.. image:: https://i.imgur.com/CozySPW.png
:width: 800px
:alt: mysql output
"""
if dirPath:
p = self._file_prefix()
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_sources` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(20) NOT NULL,
`TNSName` varchar(20) DEFAULT NULL,
`dateCreated` datetime DEFAULT NULL,
`decDeg` double DEFAULT NULL,
`decSex` varchar(45) DEFAULT NULL,
`discDate` datetime DEFAULT NULL,
`discMag` double DEFAULT NULL,
`discMagFilter` varchar(45) DEFAULT NULL,
`discSurvey` varchar(100) DEFAULT NULL,
`discoveryName` varchar(100) DEFAULT NULL,
`objectUrl` varchar(200) DEFAULT NULL,
`raDeg` double DEFAULT NULL,
`raSex` varchar(45) DEFAULT NULL,
`specType` varchar(100) DEFAULT NULL,
`transRedshift` double DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`hostName` VARCHAR(100) NULL DEFAULT NULL,
`hostRedshift` DOUBLE NULL DEFAULT NULL,
`survey` VARCHAR(100) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid` (`TNSId`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlSources = self.sourceResults.mysql(
tableNamePrefix + "_sources", filepath=dirPath + "/" + p + "sources.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_photometry` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(20) NOT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`exptime` double DEFAULT NULL,
`filter` varchar(100) DEFAULT NULL,
`limitingMag` tinyint(4) DEFAULT NULL,
`mag` double DEFAULT NULL,
`magErr` double DEFAULT NULL,
`magUnit` varchar(100) DEFAULT NULL,
`objectName` varchar(100) DEFAULT NULL,
`obsdate` datetime DEFAULT NULL,
`reportAddedDate` datetime DEFAULT NULL,
`suggestedType` varchar(100) DEFAULT NULL,
`survey` varchar(100) DEFAULT NULL,
`telescope` varchar(100) DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`remarks` VARCHAR(800) NULL DEFAULT NULL,
`sourceComment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid_survey_obsdate` (`TNSId`,`survey`,`obsdate`),
UNIQUE INDEX `u_tnsid_survey_obsdate` (`TNSId` ASC, `survey` ASC, `obsdate` ASC),
UNIQUE INDEX `u_tnsid_obsdate_objname` (`TNSId` ASC, `obsdate` ASC, `objectName` ASC)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlPhot = self.photResults.mysql(
tableNamePrefix + "_photometry", filepath=dirPath + "/" + p + "phot.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_spectra` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(45) NOT NULL,
`TNSuser` varchar(45) DEFAULT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`exptime` double DEFAULT NULL,
`obsdate` datetime DEFAULT NULL,
`reportAddedDate` datetime DEFAULT NULL,
`specType` varchar(100) DEFAULT NULL,
`survey` varchar(100) DEFAULT NULL,
`telescope` varchar(100) DEFAULT NULL,
`transRedshift` double DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`remarks` VARCHAR(800) NULL DEFAULT NULL,
`sourceComment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `u_tnsid_survey_obsdate` (`TNSId`,`survey`,`obsdate`),
UNIQUE KEY `u_id_user_obsdate` (`TNSId`,`TNSuser`,`obsdate`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlSpec = self.specResults.mysql(
tableNamePrefix + "_spectra", filepath=dirPath + "/" + p + "spec.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_files` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(100) NOT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`dateObs` datetime DEFAULT NULL,
`filename` varchar(200) DEFAULT NULL,
`spec1phot2` tinyint(4) DEFAULT NULL,
`url` varchar(800) DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`comment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid_url` (`TNSId`,`url`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlFiles = self.relatedFilesResults.mysql(
tableNamePrefix + "_files", filepath=dirPath + "/" + p + "relatedFiles.sql", createStatement=createStatement)
else:
mysqlSources = self.sourceResults.mysql(
tableNamePrefix + "_sources")
mysqlPhot = self.photResults.mysql(tableNamePrefix + "_photometry")
mysqlSpec = self.specResults.mysql(tableNamePrefix + "_spectra")
mysqlFiles = self.relatedFilesResults.mysql(
tableNamePrefix + "_files")
return mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles
def _query_tns(self):
"""
*determine how to query the TNS, send query and parse the results*
**Return:**
- ``results`` -- a list of dictionaries (one dictionary for each result set returned from the TNS)
"""
self.log.info('starting the ``get`` method')
sourceTable = []
photoTable = []
specTable = []
relatedFilesTable = []
# THIS stop IS TO KEEP TRACK OF THE TNS PAGINATION IF MANY RESULT PAGES
# ARE RETURNED
stop = False
sourceCount = 0
while not stop:
status_code, content, self._searchURL = self._get_tns_search_results()
if status_code != 200:
self.log.error(
'cound not get the search reuslts from the TNS, HTML error code %(status_code)s ' % locals())
return None
if "No results found" in content:
print "No results found"
return sourceTable, photoTable, specTable, relatedFilesTable
if self._parse_transient_rows(content, True) < self.batchSize:
stop = True
else:
self.page += 1
thisPage = self.page
print "Downloaded %(thisPage)s page(s) from the TNS. %(sourceCount)s transients parsed so far." % locals()
sourceCount += self.batchSize
print "\t" + self._searchURL
timesleep.sleep(1)
# PARSE ALL ROWS RETURNED
for transientRow in self._parse_transient_rows(content):
# TOP LEVEL DISCOVERY CONTENT
sourceContent = transientRow.group()
discInfo, TNSId = self._parse_discovery_information(
sourceContent)
sourceTable.append(discInfo)
# PHOTOMETERY
phot, relatedFiles = self._parse_photometry_data(
sourceContent, TNSId)
photoTable += phot
relatedFilesTable += relatedFiles
# SPECTRA
spec, relatedFiles = self._parse_spectral_data(
sourceContent, TNSId)
specTable += spec
relatedFilesTable += relatedFiles
# SORT BY SEPARATION FROM THE SEARCH COORDINATES
try:
sourceTable = sorted(sourceTable, key=itemgetter(
'separationArcsec'), reverse=False)
except:
pass
self.log.info('completed the ``get`` method')
return sourceTable, photoTable, specTable, relatedFilesTable
def _get_tns_search_results(
self):
"""
*query the tns and result the response*
"""
self.log.info('starting the ``_get_tns_search_results`` method')
try:
response = requests.get(
url="http://wis-tns.weizmann.ac.il/search",
params={
"page": self.page,
"ra": self.ra,
"decl": self.dec,
"radius": self.radiusArcsec,
"name": self.name,
"internal_name": self.internal_name,
"date_start[date]": self.start,
"date_end[date]": self.end,
"num_page": self.batchSize,
"display[redshift]": "1",
"display[hostname]": "1",
"display[host_redshift]": "1",
"display[source_group_name]": "1",
"display[internal_name]": "1",
"display[spectra_count]": "1",
"display[discoverymag]": "1",
"display[discmagfilter]": "1",
"display[discoverydate]": "1",
"display[discoverer]": "1",
"display[sources]": "1",
"display[bibcode]": "1",
},
)
except requests.exceptions.RequestException:
print('HTTP Request failed')
self.log.info('completed the ``_get_tns_search_results`` method')
return response.status_code, response.content, response.url
def _file_prefix(
self):
"""*Generate a file prefix based on the type of search for saving files to disk*
**Return:**
- ``prefix`` -- the file prefix
"""
self.log.info('starting the ``_file_prefix`` method')
if self.ra:
now = datetime.now()
prefix = now.strftime("%Y%m%dt%H%M%S%f_tns_conesearch_")
elif self.name:
prefix = self.name + "_tns_conesearch_"
elif self.internal_name:
prefix = self.internal_name + "_tns_conesearch_"
elif self.discInLastDays:
discInLastDays = str(self.discInLastDays)
now = datetime.now()
prefix = now.strftime(
discInLastDays + "d_since_%Y%m%d_tns_conesearch_")
self.log.info('completed the ``_file_prefix`` method')
return prefix
def _parse_transient_rows(
self,
content,
count=False):
"""* parse transient rows from the TNS result page content*
**Key Arguments:**
- ``content`` -- the content from the TNS results page.
- ``count`` -- return only the number of rows
**Return:**
- ``transientRows``
"""
self.log.info('starting the ``_parse_transient_rows`` method')
regexForRow = r"""\n([^\n]*?<a href="/object/.*?)(?=\n[^\n]*?<a href="/object/|<\!\-\- /\.section, /#content \-\->)"""
if count:
# A SINGLE SOURCE BLOCK
matchedSources = re.findall(
regexForRow,
content,
flags=re.S # re.S
)
return len(matchedSources)
# A SINGLE SOURCE BLOCK
matchedSources = re.finditer(
regexForRow,
content,
flags=re.S # re.S
)
self.log.info('completed the ``_parse_transient_rows`` method')
return matchedSources
def _parse_discovery_information(
self,
content):
"""* parse discovery information from one row on the TNS results page*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page.
**Return:**
- ``discoveryData`` -- dictionary of results
- ``TNSId`` -- the unique TNS id for the transient
"""
self.log.info('starting the ``_parse_discovery_information`` method')
# ASTROCALC UNIT CONVERTER OBJECT
converter = unit_conversion(
log=self.log
)
matches = re.finditer(
r"""<tr class="row-.*?"><td class="cell-id">(?P<tnsId>\d*?)</td><td class="cell-name"><a href="(?P<objectUrl>.*?)">(?P<TNSName>.*?)</a></td><td class="cell-.*?<td class="cell-ra">(?P<raSex>.*?)</td><td class="cell-decl">(?P<decSex>.*?)</td><td class="cell-ot_name">(?P<specType>.*?)</td><td class="cell-redshift">(?P<transRedshift>.*?)</td><td class="cell-hostname">(?P<hostName>.*?)</td><td class="cell-host_redshift">(?P<hostRedshift>.*?)</td><td class="cell-source_group_name">(?P<discSurvey>.*?)</td>.*?<td class="cell-internal_name">(<a.*?>)?(?P<discoveryName>.*?)(</a>)?</td>.*?<td class="cell-discoverymag">(?P<discMag>.*?)</td><td class="cell-disc_filter_name">(?P<discMagFilter>.*?)</td><td class="cell-discoverydate">(?P<discDate>.*?)</td><td class="cell-discoverer">(?P<sender>.*?)</td>.*?</tr>""",
content,
flags=0 # re.S
)
discoveryData = []
for match in matches:
row = match.groupdict()
for k, v in row.iteritems():
row[k] = v.strip()
if len(v) == 0:
row[k] = None
if row["transRedshift"] == 0:
row["transRedshift"] = None
if row["TNSName"][0] in ["1", "2"]:
row["TNSName"] = "SN" + row["TNSName"]
row["objectUrl"] = "http://wis-tns.weizmann.ac.il" + \
row["objectUrl"]
# CONVERT COORDINATES TO DECIMAL DEGREES
row["raDeg"] = converter.ra_sexegesimal_to_decimal(
ra=row["raSex"]
)
row["decDeg"] = converter.dec_sexegesimal_to_decimal(
dec=row["decSex"]
)
# IF THIS IS A COORDINATE SEARCH, ADD SEPARATION FROM
# ORIGINAL QUERY COORDINATES
if self.ra:
# CALCULATE SEPARATION IN ARCSEC
from astrocalc.coords import separations
calculator = separations(
log=self.log,
ra1=self.ra,
dec1=self.dec,
ra2=row["raDeg"],
dec2=row["decDeg"],
)
angularSeparation, north, east = calculator.get()
row["separationArcsec"] = angularSeparation
row["separationNorthArcsec"] = north
row["separationEastArcsec"] = east
if not row["discSurvey"]:
row["survey"] = row["sender"]
del row["sender"]
del row["tnsId"]
row["TNSName"] = row["TNSName"].replace(" ", "")
row["TNSId"] = row["TNSName"].replace(
"SN", "").replace("AT", "")
TNSId = row["TNSId"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "TNSName", "discoveryName", "discSurvey", "raSex", "decSex", "raDeg", "decDeg",
"transRedshift", "specType", "discMag", "discMagFilter", "discDate", "objectUrl", "hostName", "hostRedshift", "separationArcsec", "separationNorthArcsec", "separationEastArcsec"]
for k, v in row.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = row[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
discoveryData.append(row)
self.log.info('completed the ``_parse_discovery_information`` method')
return discoveryData[0], TNSId
def _parse_photometry_data(
self,
content,
TNSId):
"""*parse photometry data from a row in the tns results content*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page
- ``TNSId`` -- the tns id of the transient
**Return:**
- ``photData`` -- a list of dictionaries of the photometry data
- ``relatedFilesTable`` -- a list of dictionaries of transient photometry related files
"""
self.log.info('starting the ``_parse_photometry_data`` method')
photData = []
relatedFilesTable = []
# AT REPORT BLOCK
ATBlock = re.search(
r"""<tr class=[^\n]*?AT reportings.*?(?=<tr class=[^\n]*?Classification reportings|$)""",
content,
flags=re.S # re.S
)
if ATBlock:
ATBlock = ATBlock.group()
reports = re.finditer(
r"""<tr class="row-[^"]*"><td class="cell-id">.*?</table>""",
ATBlock,
flags=re.S # re.S
)
relatedFiles = self._parse_related_files(ATBlock)
for r in reports:
header = re.search(
r"""<tr class="row[^"]*".*?time_received">(?P<reportAddedDate>[^<]*).*?user_name">(?P<sender>[^<]*).*?reporter_name">(?P<reporters>[^<]*).*?source_group_name">(?P<surveyGroup>[^<]*).*?ra">(?P<ra>[^<]*).*?decl">(?P<dec>[^<]*).*?discovery_date">(?P<obsDate>[^<]*).*?flux">(?P<mag>[^<]*).*?filter_name">(?P<magFilter>[^<]*).*?related_files">(?P<relatedFiles>[^<]*).*?type_name">(?P<suggestedType>[^<]*).*?hostname">(?P<hostName>[^<]*).*?host_redshift">(?P<hostRedshift>[^<]*).*?internal_name">(?P<objectName>[^<]*).*?groups">(?P<survey>[^<]*).*?remarks">(?P<sourceComment>[^<]*)""",
r.group(),
flags=0 # re.S
)
try:
header = header.groupdict()
except:
print r.group()
header["TNSId"] = TNSId
del header["reporters"]
del header["surveyGroup"]
del header["hostName"]
del header["hostRedshift"]
del header["mag"]
del header["magFilter"]
del header["obsDate"]
del header["ra"]
del header["dec"]
if not self.comments:
del header['sourceComment']
else:
theseComments = header[
"sourceComment"].split("\n")
header["sourceComment"] = ""
for c in theseComments:
header["sourceComment"] += " " + c.strip()
header["sourceComment"] = header[
"sourceComment"].strip().replace('"', "'")[0:750]
phot = re.finditer(
r"""<tr class="row\-[^"]*".*?obsdate">(?P<obsdate>[^<]*).*?flux">(?P<mag>[^<]*).*?fluxerr">(?P<magErr>[^<]*).*?limflux">(?P<limitingMag>[^<]*).*?unit_name">(?P<magUnit>[^<]*).*?filter_name">(?P<filter>[^<]*).*?tel_inst">(?P<telescope>[^<]*).*?exptime">(?P<exptime>[^<]*).*?observer">(?P<observer>[^<]*).*?-remarks">(?P<remarks>[^<]*)""",
r.group(),
flags=0 # re.S
)
filesAppended = False
for p in phot:
p = p.groupdict()
del p["observer"]
if p["limitingMag"] and not p["mag"]:
p["mag"] = p["limitingMag"]
p["limitingMag"] = 1
p["remarks"] = p["remarks"].replace(
"[Last non detection]", "")
else:
p["limitingMag"] = 0
if not self.comments:
del p["remarks"]
p.update(header)
if p["relatedFiles"] and filesAppended == False:
filesAppended = True
for f in relatedFiles:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = f[
"filepath"].split("/")[-1]
thisFile["url"] = f["filepath"]
if self.comments:
thisFile["comment"] = f[
"fileComment"].replace("\n", " ").strip().replace('"', "'")[0:750]
thisFile["dateObs"] = p["obsdate"]
thisFile["spec1phot2"] = 2
relatedFilesTable.append(thisFile)
if not p["survey"] and not p["objectName"]:
p["survey"] = p["sender"]
del p["relatedFiles"]
del p["sender"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "survey", "obsdate", "filter", "limitingMag", "mag", "magErr",
"magUnit", "suggestedType", "telescope", "exptime", "reportAddedDate"]
for k, v in p.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = p[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
photData.append(orow)
self.log.info('completed the ``_parse_photometry_data`` method')
return photData, relatedFilesTable
def _parse_spectral_data(
self,
content,
TNSId):
"""*parse spectra data from a row in the tns results content*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page
- ``TNSId`` -- the tns id of the transient
**Return:**
- ``specData`` -- a list of dictionaries of the spectral data
- ``relatedFilesTable`` -- a list of dictionaries of transient spectrum related files
"""
self.log.info('starting the ``_parse_spectral_data`` method')
specData = []
relatedFilesTable = []
# CLASSIFICATION BLOCK
classBlock = re.search(
r"""<tr class=[^\n]*?Classification reportings.*$""",
content,
flags=re.S # re.S
)
if classBlock:
classBlock = classBlock.group()
reports = re.finditer(
r"""<tr class="row-[^"]*"><td class="cell-id">.*?</tbody>\s*</table>\s*</div></td> </tr>\s*</tbody>\s*</table>\s*</div></td> </tr>""",
classBlock,
flags=re.S #
)
relatedFiles = self._parse_related_files(classBlock)
for r in reports:
header = re.search(
r"""<tr class="row.*?time_received">(?P<reportAddedDate>[^<]*).*?user_name">(?P<TNSuser>[^<]*).*?classifier_name">(?P<reporters>[^<]*).*?source_group_name">(?P<survey>[^<]*).*?-type">(?P<specType>[^<]*).*?-redshift">(?P<transRedshift>[^<]*).*?-related_files">(?P<relatedFiles>[^<]*).*?-groups">(?P<surveyGroup>[^<]*).*?-remarks">(?P<sourceComment>[^<]*)</td>""",
r.group(),
flags=re.S # re.S
)
if not header:
continue
header = header.groupdict()
header["TNSId"] = TNSId
del header["reporters"]
del header["surveyGroup"]
del header["survey"]
if not self.comments:
del header['sourceComment']
else:
theseComments = header[
"sourceComment"].split("\n")
header["sourceComment"] = ""
for c in theseComments:
header["sourceComment"] += " " + c.strip()
header["sourceComment"] = header[
"sourceComment"].strip().replace('"', "'")[0:750]
spec = re.finditer(
r"""<tr class="class-results-.*?-obsdate">(?P<obsdate>[^<]*).*?-tel_inst">(?P<telescope>[^<]*).*?-exptime">(?P<exptime>[^<]*).*?-observer">(?P<sender>[^<]*).*?-reducer">(?P<reducer>[^<]*).*?-source_group_name">(?P<survey>[^<]*).*?-asciifile">(.*?<a href="(?P<filepath>[^"]*)".*?</a>)?.*?-fitsfile">(.*?<a href="(?P<fitsFilepath>[^"]*)".*?</a>)?.*?-groups">(?P<surveyGroup>[^<]*).*?-remarks">(?P<remarks>[^<]*)""",
r.group(),
flags=0 # re.S
)
filesAppended = False
for s in spec:
s = s.groupdict()
del s["sender"]
del s["surveyGroup"]
del s["reducer"]
if not self.comments:
del s["remarks"]
else:
s["remarks"] = s["remarks"].replace('"', "'")[0:750]
s.update(header)
if s["relatedFiles"] and filesAppended == False:
filesAppended = True
for f in relatedFiles:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = f[
"filepath"].split("/")[-1]
thisFile["url"] = f["filepath"]
if self.comments:
thisFile["comment"] = f[
"fileComment"].replace("\n", " ").strip()
thisFile["dateObs"] = s["obsdate"]
thisFile["spec1phot2"] = 1
relatedFilesTable.append(thisFile)
for ffile in [s["filepath"], s["fitsFilepath"]]:
if ffile:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = ffile.split(
"/")[-1]
thisFile["url"] = ffile
if self.comments:
thisFile["comment"] = ""
thisFile["dateObs"] = s["obsdate"]
thisFile["spec1phot2"] = 1
relatedFilesTable.append(thisFile)
del s["filepath"]
del s["fitsFilepath"]
del s["relatedFiles"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "survey", "obsdate", "specType", "transRedshift",
"telescope", "exptime", "reportAddedDate", "TNSuser"]
for k, v in s.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = s[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
specData.append(orow)
self.log.info('completed the ``_parse_spectral_data`` method')
return specData, relatedFilesTable
|
thespacedoctor/transientNamer
|
transientNamer/search.py
|
search._parse_spectral_data
|
python
|
def _parse_spectral_data(
self,
content,
TNSId):
self.log.info('starting the ``_parse_spectral_data`` method')
specData = []
relatedFilesTable = []
# CLASSIFICATION BLOCK
classBlock = re.search(
r"""<tr class=[^\n]*?Classification reportings.*$""",
content,
flags=re.S # re.S
)
if classBlock:
classBlock = classBlock.group()
reports = re.finditer(
r"""<tr class="row-[^"]*"><td class="cell-id">.*?</tbody>\s*</table>\s*</div></td> </tr>\s*</tbody>\s*</table>\s*</div></td> </tr>""",
classBlock,
flags=re.S #
)
relatedFiles = self._parse_related_files(classBlock)
for r in reports:
header = re.search(
r"""<tr class="row.*?time_received">(?P<reportAddedDate>[^<]*).*?user_name">(?P<TNSuser>[^<]*).*?classifier_name">(?P<reporters>[^<]*).*?source_group_name">(?P<survey>[^<]*).*?-type">(?P<specType>[^<]*).*?-redshift">(?P<transRedshift>[^<]*).*?-related_files">(?P<relatedFiles>[^<]*).*?-groups">(?P<surveyGroup>[^<]*).*?-remarks">(?P<sourceComment>[^<]*)</td>""",
r.group(),
flags=re.S # re.S
)
if not header:
continue
header = header.groupdict()
header["TNSId"] = TNSId
del header["reporters"]
del header["surveyGroup"]
del header["survey"]
if not self.comments:
del header['sourceComment']
else:
theseComments = header[
"sourceComment"].split("\n")
header["sourceComment"] = ""
for c in theseComments:
header["sourceComment"] += " " + c.strip()
header["sourceComment"] = header[
"sourceComment"].strip().replace('"', "'")[0:750]
spec = re.finditer(
r"""<tr class="class-results-.*?-obsdate">(?P<obsdate>[^<]*).*?-tel_inst">(?P<telescope>[^<]*).*?-exptime">(?P<exptime>[^<]*).*?-observer">(?P<sender>[^<]*).*?-reducer">(?P<reducer>[^<]*).*?-source_group_name">(?P<survey>[^<]*).*?-asciifile">(.*?<a href="(?P<filepath>[^"]*)".*?</a>)?.*?-fitsfile">(.*?<a href="(?P<fitsFilepath>[^"]*)".*?</a>)?.*?-groups">(?P<surveyGroup>[^<]*).*?-remarks">(?P<remarks>[^<]*)""",
r.group(),
flags=0 # re.S
)
filesAppended = False
for s in spec:
s = s.groupdict()
del s["sender"]
del s["surveyGroup"]
del s["reducer"]
if not self.comments:
del s["remarks"]
else:
s["remarks"] = s["remarks"].replace('"', "'")[0:750]
s.update(header)
if s["relatedFiles"] and filesAppended == False:
filesAppended = True
for f in relatedFiles:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = f[
"filepath"].split("/")[-1]
thisFile["url"] = f["filepath"]
if self.comments:
thisFile["comment"] = f[
"fileComment"].replace("\n", " ").strip()
thisFile["dateObs"] = s["obsdate"]
thisFile["spec1phot2"] = 1
relatedFilesTable.append(thisFile)
for ffile in [s["filepath"], s["fitsFilepath"]]:
if ffile:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = ffile.split(
"/")[-1]
thisFile["url"] = ffile
if self.comments:
thisFile["comment"] = ""
thisFile["dateObs"] = s["obsdate"]
thisFile["spec1phot2"] = 1
relatedFilesTable.append(thisFile)
del s["filepath"]
del s["fitsFilepath"]
del s["relatedFiles"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "survey", "obsdate", "specType", "transRedshift",
"telescope", "exptime", "reportAddedDate", "TNSuser"]
for k, v in s.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = s[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
specData.append(orow)
self.log.info('completed the ``_parse_spectral_data`` method')
return specData, relatedFilesTable
|
*parse spectra data from a row in the tns results content*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page
- ``TNSId`` -- the tns id of the transient
**Return:**
- ``specData`` -- a list of dictionaries of the spectral data
- ``relatedFilesTable`` -- a list of dictionaries of transient spectrum related files
|
train
|
https://github.com/thespacedoctor/transientNamer/blob/39be410c84275ed4669632f5df67e728d66a318f/transientNamer/search.py#L1135-L1273
| null |
class search():
"""
*The worker class for the transient namer search module*
**Key Arguments:**
- ``log`` -- logger
- ``settings`` -- the settings dictionary
- ``ra`` -- RA of the location being checked
- ``dec`` -- DEC of the location being searched
- ``radiusArcsec`` - the radius of the conesearch to perform against the TNS
- ``name`` -- name of the object to search the TNS for
- ``discInLastDays`` -- search the TNS for transient discovered in the last X days
- ``comments`` -- print the comments from the TNS, note these can be long making table outputs somewhat unreadable. Default *False*
**Usage:**
To initiate a search object to search the TNS via an object name (either TNS or survey names accepted):
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
name="Gaia16bbi"
)
or for a conesearch use something similar to:
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
ra="06:50:36.74",
dec="+31:06:44.7",
radiusArcsec=5
)
Note the search method can accept coordinates in sexagesimal or decimal defree formats.
To list all new objects discovered in the last three weeks, then use:
.. code-block:: python
from transientNamer import search
tns = search(
log=log,
discInLastDays=21
)
"""
# Initialisation
def __init__(
self,
log,
ra="",
dec="",
radiusArcsec="",
name="",
discInLastDays="",
settings=False,
comments=False
):
self.log = log
log.debug("instansiating a new 'search' object")
self.settings = settings
self.ra = ra
self.dec = dec
self.radiusArcsec = radiusArcsec
self.comments = comments
self.name = name
self.internal_name = ""
self.discInLastDays = discInLastDays
self.page = 0
self.batchSize = 1000
# CREATE THE TIME-RANGE WINDOW TO SEARCH TNS
if not discInLastDays:
self.start = ""
self.end = ""
else:
discInLastDays = int(discInLastDays)
td = timedelta(days=1)
end = datetime.now() + td
self.end = end.strftime("%Y-%m-%d")
td = timedelta(days=discInLastDays)
start = datetime.now() - td
self.start = start.strftime("%Y-%m-%d")
# DETERMINE IF WE HAVE A TNS OR INTERAL SURVEY NAME
if self.name:
matchObject = re.match(r'^((SN|AT) ?)?(\d{4}\w{1,6})', self.name)
if matchObject:
self.name = matchObject.group(3)
else:
self.internal_name = self.name
self.name = ""
# DO THE SEARCH OF THE TNS AND COMPILE THE RESULTS INTO SEPARATE RESULT
# SETS
self.sourceResultsList, self.photResultsList, self.specResultsList, self.relatedFilesResultsList = self._query_tns()
self.sourceResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.sourceResultsList
)
self.photResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.photResultsList
)
self.specResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.specResultsList
)
self.relatedFilesResults = list_of_dictionaries(
log=log,
listOfDictionaries=self.relatedFilesResultsList
)
return None
@property
def sources(
self):
"""*The results of the search returned as a python list of dictionaries*
**Usage:**
.. code-block:: python
sources = tns.sources
"""
sourceResultsList = []
sourceResultsList[:] = [dict(l) for l in self.sourceResultsList]
return sourceResultsList
@property
def spectra(
self):
"""*The associated source spectral data*
**Usage:**
.. code-block:: python
sourceSpectra = tns.spectra
"""
specResultsList = []
specResultsList[:] = [dict(l) for l in self.specResultsList]
return specResultsList
@property
def files(
self):
"""*The associated source files*
**Usage:**
.. code-block:: python
sourceFiles = tns.files
"""
relatedFilesResultsList = []
relatedFilesResultsList[:] = [dict(l)
for l in self.relatedFilesResultsList]
return relatedFilesResultsList
@property
def photometry(
self):
"""*The associated source photometry*
**Usage:**
.. code-block:: python
sourcePhotometry = tns.photometry
"""
photResultsList = []
photResultsList[:] = [dict(l) for l in self.photResultsList]
return photResultsList
@property
def url(
self):
"""*The generated URL used for searching of the TNS*
**Usage:**
.. code-block:: python
searchURL = tns.url
"""
return self._searchURL
def csv(
self,
dirPath=None):
"""*Render the results in csv format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `csvSources` -- the top-level transient data
- `csvPhot` -- all photometry associated with the transients
- `csvSpec` -- all spectral data associated with the transients
- `csvFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in csv format:
.. code-block:: python
csvSources, csvPhot, csvSpec, csvFiles = tns.csv()
print csvSources
.. code-block:: text
TNSId,TNSName,discoveryName,discSurvey,raSex,decSex,raDeg,decDeg,transRedshift,specType,discMag,discMagFilter,discDate,objectUrl,hostName,hostRedshift,separationArcsec,separationNorthArcsec,separationEastArcsec
2016asf,SN2016asf,ASASSN-16cs,ASAS-SN,06:50:36.73,+31:06:45.36,102.6530,31.1126,0.021,SN Ia,17.1,V-Johnson,2016-03-06 08:09:36,http://wis-tns.weizmann.ac.il/object/2016asf,KUG 0647+311,,0.66,0.65,-0.13
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.csv("~/tns")
.. image:: https://i.imgur.com/BwwqMBg.png
:width: 800px
:alt: csv output
"""
if dirPath:
p = self._file_prefix()
csvSources = self.sourceResults.csv(
filepath=dirPath + "/" + p + "sources.csv")
csvPhot = self.photResults.csv(
filepath=dirPath + "/" + p + "phot.csv")
csvSpec = self.specResults.csv(
filepath=dirPath + "/" + p + "spec.csv")
csvFiles = self.relatedFilesResults.csv(
filepath=dirPath + "/" + p + "relatedFiles.csv")
else:
csvSources = self.sourceResults.csv()
csvPhot = self.photResults.csv()
csvSpec = self.specResults.csv()
csvFiles = self.relatedFilesResults.csv()
return csvSources, csvPhot, csvSpec, csvFiles
def json(
self,
dirPath=None):
"""*Render the results in json format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `jsonSources` -- the top-level transient data
- `jsonPhot` -- all photometry associated with the transients
- `jsonSpec` -- all spectral data associated with the transients
- `jsonFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in json format:
.. code-block:: python
jsonSources, jsonPhot, jsonSpec, jsonFiles = tns.json()
print jsonSources
.. code-block:: text
[
{
"TNSId": "2016asf",
"TNSName": "SN2016asf",
"decDeg": 31.1126,
"decSex": "+31:06:45.36",
"discDate": "2016-03-06 08:09:36",
"discMag": "17.1",
"discMagFilter": "V-Johnson",
"discSurvey": "ASAS-SN",
"discoveryName": "ASASSN-16cs",
"hostName": "KUG 0647+311",
"hostRedshift": null,
"objectUrl": "http://wis-tns.weizmann.ac.il/object/2016asf",
"raDeg": 102.65304166666667,
"raSex": "06:50:36.73",
"separationArcsec": "0.66",
"separationEastArcsec": "-0.13",
"separationNorthArcsec": "0.65",
"specType": "SN Ia",
"transRedshift": "0.021"
}
]
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.json("~/tns")
.. image:: https://i.imgur.com/wAHqARI.png
:width: 800px
:alt: json output
"""
if dirPath:
p = self._file_prefix()
jsonSources = self.sourceResults.json(
filepath=dirPath + "/" + p + "sources.json")
jsonPhot = self.photResults.json(
filepath=dirPath + "/" + p + "phot.json")
jsonSpec = self.specResults.json(
filepath=dirPath + "/" + p + "spec.json")
jsonFiles = self.relatedFilesResults.json(
filepath=dirPath + "/" + p + "relatedFiles.json")
else:
jsonSources = self.sourceResults.json()
jsonPhot = self.photResults.json()
jsonSpec = self.specResults.json()
jsonFiles = self.relatedFilesResults.json()
return jsonSources, jsonPhot, jsonSpec, jsonFiles
def yaml(
self,
dirPath=None):
"""*Render the results in yaml format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `yamlSources` -- the top-level transient data
- `yamlPhot` -- all photometry associated with the transients
- `yamlSpec` -- all spectral data associated with the transients
- `yamlFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in yaml format:
.. code-block:: python
yamlSources, yamlPhot, yamlSpec, yamlFiles = tns.yaml()
print yamlSources
.. code-block:: text
- TNSId: 2016asf
TNSName: SN2016asf
decDeg: 31.1126
decSex: '+31:06:45.36'
discDate: '2016-03-06 08:09:36'
discMag: '17.1'
discMagFilter: V-Johnson
discSurvey: ASAS-SN
discoveryName: ASASSN-16cs
hostName: KUG 0647+311
hostRedshift: null
objectUrl: http://wis-tns.weizmann.ac.il/object/2016asf
raDeg: 102.65304166666667
raSex: '06:50:36.73'
separationArcsec: '0.66'
separationEastArcsec: '-0.13'
separationNorthArcsec: '0.65'
specType: SN Ia
transRedshift: '0.021'
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.yaml("~/tns")
.. image:: https://i.imgur.com/ZpJIC6p.png
:width: 800px
:alt: yaml output
"""
if dirPath:
p = self._file_prefix()
yamlSources = self.sourceResults.yaml(
filepath=dirPath + "/" + p + "sources.yaml")
yamlPhot = self.photResults.yaml(
filepath=dirPath + "/" + p + "phot.yaml")
yamlSpec = self.specResults.yaml(
filepath=dirPath + "/" + p + "spec.yaml")
yamlFiles = self.relatedFilesResults.yaml(
filepath=dirPath + "/" + p + "relatedFiles.yaml")
else:
yamlSources = self.sourceResults.yaml()
yamlPhot = self.photResults.yaml()
yamlSpec = self.specResults.yaml()
yamlFiles = self.relatedFilesResults.yaml()
return yamlSources, yamlPhot, yamlSpec, yamlFiles
def markdown(
self,
dirPath=None):
"""*Render the results in markdown format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `markdownSources` -- the top-level transient data
- `markdownPhot` -- all photometry associated with the transients
- `markdownSpec` -- all spectral data associated with the transients
- `markdownFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in markdown table format:
.. code-block:: python
markdownSources, markdownPhot, markdownSpec, markdownFiles = tns.markdown()
print markdownSources
.. code-block:: text
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
|:---------|:-----------|:---------------|:------------|:-------------|:--------------|:----------|:---------|:---------------|:----------|:---------|:---------------|:---------------------|:----------------------------------------------|:--------------|:--------------|:------------------|:-----------------------|:----------------------|
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.markdown("~/tns")
.. image:: https://i.imgur.com/AYLBQoJ.png
:width: 800px
:alt: markdown output
"""
if dirPath:
p = self._file_prefix()
markdownSources = self.sourceResults.markdown(
filepath=dirPath + "/" + p + "sources.md")
markdownPhot = self.photResults.markdown(
filepath=dirPath + "/" + p + "phot.md")
markdownSpec = self.specResults.markdown(
filepath=dirPath + "/" + p + "spec.md")
markdownFiles = self.relatedFilesResults.markdown(
filepath=dirPath + "/" + p + "relatedFiles.md")
else:
markdownSources = self.sourceResults.markdown()
markdownPhot = self.photResults.markdown()
markdownSpec = self.specResults.markdown()
markdownFiles = self.relatedFilesResults.markdown()
return markdownSources, markdownPhot, markdownSpec, markdownFiles
def table(
self,
dirPath=None):
"""*Render the results as an ascii table*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `tableSources` -- the top-level transient data
- `tablePhot` -- all photometry associated with the transients
- `tableSpec` -- all spectral data associated with the transients
- `tableFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in ascii table format:
.. code-block:: python
tableSources, tablePhot, tableSpec, tableFiles = tns.table()
print tableSources
.. code-block:: text
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.table("~/tns")
.. image:: https://i.imgur.com/m09M0ho.png
:width: 800px
:alt: ascii files
"""
if dirPath:
p = self._file_prefix()
tableSources = self.sourceResults.table(
filepath=dirPath + "/" + p + "sources.ascii")
tablePhot = self.photResults.table(
filepath=dirPath + "/" + p + "phot.ascii")
tableSpec = self.specResults.table(
filepath=dirPath + "/" + p + "spec.ascii")
tableFiles = self.relatedFilesResults.table(
filepath=dirPath + "/" + p + "relatedFiles.ascii")
else:
tableSources = self.sourceResults.table()
tablePhot = self.photResults.table()
tableSpec = self.specResults.table()
tableFiles = self.relatedFilesResults.table()
return tableSources, tablePhot, tableSpec, tableFiles
def mysql(
self,
tableNamePrefix="TNS",
dirPath=None):
"""*Render the results as MySQL Insert statements*
**Key Arguments:**
- ``tableNamePrefix`` -- the prefix for the database table names to assign the insert statements to. Default *TNS*.
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `mysqlSources` -- the top-level transient data
- `mysqlPhot` -- all photometry associated with the transients
- `mysqlSpec` -- all spectral data associated with the transients
- `mysqlFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in mysql insert format:
.. code-block:: python
mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles = tns.mysql("TNS")
print mysqlSources
.. code-block:: text
INSERT INTO `TNS_sources` (TNSId,TNSName,dateCreated,decDeg,decSex,discDate,discMag,discMagFilter,discSurvey,discoveryName,hostName,hostRedshift,objectUrl,raDeg,raSex,separationArcsec,separationEastArcsec,separationNorthArcsec,specType,transRedshift) VALUES ("2016asf" ,"SN2016asf" ,"2016-09-20T11:22:13" ,"31.1126" ,"+31:06:45.36" ,"2016-03-06 08:09:36" ,"17.1" ,"V-Johnson" ,"ASAS-SN" ,"ASASSN-16cs" ,"KUG 0647+311" ,null ,"http://wis-tns.weizmann.ac.il/object/2016asf" ,"102.653041667" ,"06:50:36.73" ,"0.66" ,"-0.13" ,"0.65" ,"SN Ia" ,"0.021") ON DUPLICATE KEY UPDATE TNSId="2016asf", TNSName="SN2016asf", dateCreated="2016-09-20T11:22:13", decDeg="31.1126", decSex="+31:06:45.36", discDate="2016-03-06 08:09:36", discMag="17.1", discMagFilter="V-Johnson", discSurvey="ASAS-SN", discoveryName="ASASSN-16cs", hostName="KUG 0647+311", hostRedshift=null, objectUrl="http://wis-tns.weizmann.ac.il/object/2016asf", raDeg="102.653041667", raSex="06:50:36.73", separationArcsec="0.66", separationEastArcsec="-0.13", separationNorthArcsec="0.65", specType="SN Ia", transRedshift="0.021", updated=1, dateLastModified=NOW() ;
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.mysql("TNS", "~/tns")
.. image:: https://i.imgur.com/CozySPW.png
:width: 800px
:alt: mysql output
"""
if dirPath:
p = self._file_prefix()
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_sources` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(20) NOT NULL,
`TNSName` varchar(20) DEFAULT NULL,
`dateCreated` datetime DEFAULT NULL,
`decDeg` double DEFAULT NULL,
`decSex` varchar(45) DEFAULT NULL,
`discDate` datetime DEFAULT NULL,
`discMag` double DEFAULT NULL,
`discMagFilter` varchar(45) DEFAULT NULL,
`discSurvey` varchar(100) DEFAULT NULL,
`discoveryName` varchar(100) DEFAULT NULL,
`objectUrl` varchar(200) DEFAULT NULL,
`raDeg` double DEFAULT NULL,
`raSex` varchar(45) DEFAULT NULL,
`specType` varchar(100) DEFAULT NULL,
`transRedshift` double DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`hostName` VARCHAR(100) NULL DEFAULT NULL,
`hostRedshift` DOUBLE NULL DEFAULT NULL,
`survey` VARCHAR(100) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid` (`TNSId`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlSources = self.sourceResults.mysql(
tableNamePrefix + "_sources", filepath=dirPath + "/" + p + "sources.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_photometry` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(20) NOT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`exptime` double DEFAULT NULL,
`filter` varchar(100) DEFAULT NULL,
`limitingMag` tinyint(4) DEFAULT NULL,
`mag` double DEFAULT NULL,
`magErr` double DEFAULT NULL,
`magUnit` varchar(100) DEFAULT NULL,
`objectName` varchar(100) DEFAULT NULL,
`obsdate` datetime DEFAULT NULL,
`reportAddedDate` datetime DEFAULT NULL,
`suggestedType` varchar(100) DEFAULT NULL,
`survey` varchar(100) DEFAULT NULL,
`telescope` varchar(100) DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`remarks` VARCHAR(800) NULL DEFAULT NULL,
`sourceComment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid_survey_obsdate` (`TNSId`,`survey`,`obsdate`),
UNIQUE INDEX `u_tnsid_survey_obsdate` (`TNSId` ASC, `survey` ASC, `obsdate` ASC),
UNIQUE INDEX `u_tnsid_obsdate_objname` (`TNSId` ASC, `obsdate` ASC, `objectName` ASC)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlPhot = self.photResults.mysql(
tableNamePrefix + "_photometry", filepath=dirPath + "/" + p + "phot.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_spectra` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(45) NOT NULL,
`TNSuser` varchar(45) DEFAULT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`exptime` double DEFAULT NULL,
`obsdate` datetime DEFAULT NULL,
`reportAddedDate` datetime DEFAULT NULL,
`specType` varchar(100) DEFAULT NULL,
`survey` varchar(100) DEFAULT NULL,
`telescope` varchar(100) DEFAULT NULL,
`transRedshift` double DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`remarks` VARCHAR(800) NULL DEFAULT NULL,
`sourceComment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `u_tnsid_survey_obsdate` (`TNSId`,`survey`,`obsdate`),
UNIQUE KEY `u_id_user_obsdate` (`TNSId`,`TNSuser`,`obsdate`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlSpec = self.specResults.mysql(
tableNamePrefix + "_spectra", filepath=dirPath + "/" + p + "spec.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_files` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(100) NOT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`dateObs` datetime DEFAULT NULL,
`filename` varchar(200) DEFAULT NULL,
`spec1phot2` tinyint(4) DEFAULT NULL,
`url` varchar(800) DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`comment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid_url` (`TNSId`,`url`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlFiles = self.relatedFilesResults.mysql(
tableNamePrefix + "_files", filepath=dirPath + "/" + p + "relatedFiles.sql", createStatement=createStatement)
else:
mysqlSources = self.sourceResults.mysql(
tableNamePrefix + "_sources")
mysqlPhot = self.photResults.mysql(tableNamePrefix + "_photometry")
mysqlSpec = self.specResults.mysql(tableNamePrefix + "_spectra")
mysqlFiles = self.relatedFilesResults.mysql(
tableNamePrefix + "_files")
return mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles
def _query_tns(self):
"""
*determine how to query the TNS, send query and parse the results*
**Return:**
- ``results`` -- a list of dictionaries (one dictionary for each result set returned from the TNS)
"""
self.log.info('starting the ``get`` method')
sourceTable = []
photoTable = []
specTable = []
relatedFilesTable = []
# THIS stop IS TO KEEP TRACK OF THE TNS PAGINATION IF MANY RESULT PAGES
# ARE RETURNED
stop = False
sourceCount = 0
while not stop:
status_code, content, self._searchURL = self._get_tns_search_results()
if status_code != 200:
self.log.error(
'cound not get the search reuslts from the TNS, HTML error code %(status_code)s ' % locals())
return None
if "No results found" in content:
print "No results found"
return sourceTable, photoTable, specTable, relatedFilesTable
if self._parse_transient_rows(content, True) < self.batchSize:
stop = True
else:
self.page += 1
thisPage = self.page
print "Downloaded %(thisPage)s page(s) from the TNS. %(sourceCount)s transients parsed so far." % locals()
sourceCount += self.batchSize
print "\t" + self._searchURL
timesleep.sleep(1)
# PARSE ALL ROWS RETURNED
for transientRow in self._parse_transient_rows(content):
# TOP LEVEL DISCOVERY CONTENT
sourceContent = transientRow.group()
discInfo, TNSId = self._parse_discovery_information(
sourceContent)
sourceTable.append(discInfo)
# PHOTOMETERY
phot, relatedFiles = self._parse_photometry_data(
sourceContent, TNSId)
photoTable += phot
relatedFilesTable += relatedFiles
# SPECTRA
spec, relatedFiles = self._parse_spectral_data(
sourceContent, TNSId)
specTable += spec
relatedFilesTable += relatedFiles
# SORT BY SEPARATION FROM THE SEARCH COORDINATES
try:
sourceTable = sorted(sourceTable, key=itemgetter(
'separationArcsec'), reverse=False)
except:
pass
self.log.info('completed the ``get`` method')
return sourceTable, photoTable, specTable, relatedFilesTable
def _get_tns_search_results(
self):
"""
*query the tns and result the response*
"""
self.log.info('starting the ``_get_tns_search_results`` method')
try:
response = requests.get(
url="http://wis-tns.weizmann.ac.il/search",
params={
"page": self.page,
"ra": self.ra,
"decl": self.dec,
"radius": self.radiusArcsec,
"name": self.name,
"internal_name": self.internal_name,
"date_start[date]": self.start,
"date_end[date]": self.end,
"num_page": self.batchSize,
"display[redshift]": "1",
"display[hostname]": "1",
"display[host_redshift]": "1",
"display[source_group_name]": "1",
"display[internal_name]": "1",
"display[spectra_count]": "1",
"display[discoverymag]": "1",
"display[discmagfilter]": "1",
"display[discoverydate]": "1",
"display[discoverer]": "1",
"display[sources]": "1",
"display[bibcode]": "1",
},
)
except requests.exceptions.RequestException:
print('HTTP Request failed')
self.log.info('completed the ``_get_tns_search_results`` method')
return response.status_code, response.content, response.url
def _file_prefix(
self):
"""*Generate a file prefix based on the type of search for saving files to disk*
**Return:**
- ``prefix`` -- the file prefix
"""
self.log.info('starting the ``_file_prefix`` method')
if self.ra:
now = datetime.now()
prefix = now.strftime("%Y%m%dt%H%M%S%f_tns_conesearch_")
elif self.name:
prefix = self.name + "_tns_conesearch_"
elif self.internal_name:
prefix = self.internal_name + "_tns_conesearch_"
elif self.discInLastDays:
discInLastDays = str(self.discInLastDays)
now = datetime.now()
prefix = now.strftime(
discInLastDays + "d_since_%Y%m%d_tns_conesearch_")
self.log.info('completed the ``_file_prefix`` method')
return prefix
def _parse_transient_rows(
self,
content,
count=False):
"""* parse transient rows from the TNS result page content*
**Key Arguments:**
- ``content`` -- the content from the TNS results page.
- ``count`` -- return only the number of rows
**Return:**
- ``transientRows``
"""
self.log.info('starting the ``_parse_transient_rows`` method')
regexForRow = r"""\n([^\n]*?<a href="/object/.*?)(?=\n[^\n]*?<a href="/object/|<\!\-\- /\.section, /#content \-\->)"""
if count:
# A SINGLE SOURCE BLOCK
matchedSources = re.findall(
regexForRow,
content,
flags=re.S # re.S
)
return len(matchedSources)
# A SINGLE SOURCE BLOCK
matchedSources = re.finditer(
regexForRow,
content,
flags=re.S # re.S
)
self.log.info('completed the ``_parse_transient_rows`` method')
return matchedSources
def _parse_discovery_information(
self,
content):
"""* parse discovery information from one row on the TNS results page*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page.
**Return:**
- ``discoveryData`` -- dictionary of results
- ``TNSId`` -- the unique TNS id for the transient
"""
self.log.info('starting the ``_parse_discovery_information`` method')
# ASTROCALC UNIT CONVERTER OBJECT
converter = unit_conversion(
log=self.log
)
matches = re.finditer(
r"""<tr class="row-.*?"><td class="cell-id">(?P<tnsId>\d*?)</td><td class="cell-name"><a href="(?P<objectUrl>.*?)">(?P<TNSName>.*?)</a></td><td class="cell-.*?<td class="cell-ra">(?P<raSex>.*?)</td><td class="cell-decl">(?P<decSex>.*?)</td><td class="cell-ot_name">(?P<specType>.*?)</td><td class="cell-redshift">(?P<transRedshift>.*?)</td><td class="cell-hostname">(?P<hostName>.*?)</td><td class="cell-host_redshift">(?P<hostRedshift>.*?)</td><td class="cell-source_group_name">(?P<discSurvey>.*?)</td>.*?<td class="cell-internal_name">(<a.*?>)?(?P<discoveryName>.*?)(</a>)?</td>.*?<td class="cell-discoverymag">(?P<discMag>.*?)</td><td class="cell-disc_filter_name">(?P<discMagFilter>.*?)</td><td class="cell-discoverydate">(?P<discDate>.*?)</td><td class="cell-discoverer">(?P<sender>.*?)</td>.*?</tr>""",
content,
flags=0 # re.S
)
discoveryData = []
for match in matches:
row = match.groupdict()
for k, v in row.iteritems():
row[k] = v.strip()
if len(v) == 0:
row[k] = None
if row["transRedshift"] == 0:
row["transRedshift"] = None
if row["TNSName"][0] in ["1", "2"]:
row["TNSName"] = "SN" + row["TNSName"]
row["objectUrl"] = "http://wis-tns.weizmann.ac.il" + \
row["objectUrl"]
# CONVERT COORDINATES TO DECIMAL DEGREES
row["raDeg"] = converter.ra_sexegesimal_to_decimal(
ra=row["raSex"]
)
row["decDeg"] = converter.dec_sexegesimal_to_decimal(
dec=row["decSex"]
)
# IF THIS IS A COORDINATE SEARCH, ADD SEPARATION FROM
# ORIGINAL QUERY COORDINATES
if self.ra:
# CALCULATE SEPARATION IN ARCSEC
from astrocalc.coords import separations
calculator = separations(
log=self.log,
ra1=self.ra,
dec1=self.dec,
ra2=row["raDeg"],
dec2=row["decDeg"],
)
angularSeparation, north, east = calculator.get()
row["separationArcsec"] = angularSeparation
row["separationNorthArcsec"] = north
row["separationEastArcsec"] = east
if not row["discSurvey"]:
row["survey"] = row["sender"]
del row["sender"]
del row["tnsId"]
row["TNSName"] = row["TNSName"].replace(" ", "")
row["TNSId"] = row["TNSName"].replace(
"SN", "").replace("AT", "")
TNSId = row["TNSId"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "TNSName", "discoveryName", "discSurvey", "raSex", "decSex", "raDeg", "decDeg",
"transRedshift", "specType", "discMag", "discMagFilter", "discDate", "objectUrl", "hostName", "hostRedshift", "separationArcsec", "separationNorthArcsec", "separationEastArcsec"]
for k, v in row.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = row[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
discoveryData.append(row)
self.log.info('completed the ``_parse_discovery_information`` method')
return discoveryData[0], TNSId
def _parse_photometry_data(
self,
content,
TNSId):
"""*parse photometry data from a row in the tns results content*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page
- ``TNSId`` -- the tns id of the transient
**Return:**
- ``photData`` -- a list of dictionaries of the photometry data
- ``relatedFilesTable`` -- a list of dictionaries of transient photometry related files
"""
self.log.info('starting the ``_parse_photometry_data`` method')
photData = []
relatedFilesTable = []
# AT REPORT BLOCK
ATBlock = re.search(
r"""<tr class=[^\n]*?AT reportings.*?(?=<tr class=[^\n]*?Classification reportings|$)""",
content,
flags=re.S # re.S
)
if ATBlock:
ATBlock = ATBlock.group()
reports = re.finditer(
r"""<tr class="row-[^"]*"><td class="cell-id">.*?</table>""",
ATBlock,
flags=re.S # re.S
)
relatedFiles = self._parse_related_files(ATBlock)
for r in reports:
header = re.search(
r"""<tr class="row[^"]*".*?time_received">(?P<reportAddedDate>[^<]*).*?user_name">(?P<sender>[^<]*).*?reporter_name">(?P<reporters>[^<]*).*?source_group_name">(?P<surveyGroup>[^<]*).*?ra">(?P<ra>[^<]*).*?decl">(?P<dec>[^<]*).*?discovery_date">(?P<obsDate>[^<]*).*?flux">(?P<mag>[^<]*).*?filter_name">(?P<magFilter>[^<]*).*?related_files">(?P<relatedFiles>[^<]*).*?type_name">(?P<suggestedType>[^<]*).*?hostname">(?P<hostName>[^<]*).*?host_redshift">(?P<hostRedshift>[^<]*).*?internal_name">(?P<objectName>[^<]*).*?groups">(?P<survey>[^<]*).*?remarks">(?P<sourceComment>[^<]*)""",
r.group(),
flags=0 # re.S
)
try:
header = header.groupdict()
except:
print r.group()
header["TNSId"] = TNSId
del header["reporters"]
del header["surveyGroup"]
del header["hostName"]
del header["hostRedshift"]
del header["mag"]
del header["magFilter"]
del header["obsDate"]
del header["ra"]
del header["dec"]
if not self.comments:
del header['sourceComment']
else:
theseComments = header[
"sourceComment"].split("\n")
header["sourceComment"] = ""
for c in theseComments:
header["sourceComment"] += " " + c.strip()
header["sourceComment"] = header[
"sourceComment"].strip().replace('"', "'")[0:750]
phot = re.finditer(
r"""<tr class="row\-[^"]*".*?obsdate">(?P<obsdate>[^<]*).*?flux">(?P<mag>[^<]*).*?fluxerr">(?P<magErr>[^<]*).*?limflux">(?P<limitingMag>[^<]*).*?unit_name">(?P<magUnit>[^<]*).*?filter_name">(?P<filter>[^<]*).*?tel_inst">(?P<telescope>[^<]*).*?exptime">(?P<exptime>[^<]*).*?observer">(?P<observer>[^<]*).*?-remarks">(?P<remarks>[^<]*)""",
r.group(),
flags=0 # re.S
)
filesAppended = False
for p in phot:
p = p.groupdict()
del p["observer"]
if p["limitingMag"] and not p["mag"]:
p["mag"] = p["limitingMag"]
p["limitingMag"] = 1
p["remarks"] = p["remarks"].replace(
"[Last non detection]", "")
else:
p["limitingMag"] = 0
if not self.comments:
del p["remarks"]
p.update(header)
if p["relatedFiles"] and filesAppended == False:
filesAppended = True
for f in relatedFiles:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = f[
"filepath"].split("/")[-1]
thisFile["url"] = f["filepath"]
if self.comments:
thisFile["comment"] = f[
"fileComment"].replace("\n", " ").strip().replace('"', "'")[0:750]
thisFile["dateObs"] = p["obsdate"]
thisFile["spec1phot2"] = 2
relatedFilesTable.append(thisFile)
if not p["survey"] and not p["objectName"]:
p["survey"] = p["sender"]
del p["relatedFiles"]
del p["sender"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "survey", "obsdate", "filter", "limitingMag", "mag", "magErr",
"magUnit", "suggestedType", "telescope", "exptime", "reportAddedDate"]
for k, v in p.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = p[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
photData.append(orow)
self.log.info('completed the ``_parse_photometry_data`` method')
return photData, relatedFilesTable
def _parse_related_files(
self,
content):
"""*parse the contents for related files URLs and comments*
**Key Arguments:**
- ``content`` -- the content to parse.
**Return:**
- ``relatedFiles`` -- a list of dictionaries of transient related files
"""
self.log.info('starting the ``_parse_related_files`` method')
relatedFilesList = re.finditer(
r"""<td class="cell-filename">.*?href="(?P<filepath>[^"]*).*?remarks">(?P<fileComment>[^<]*)""",
content,
flags=0 # re.S
)
relatedFiles = []
for f in relatedFilesList:
f = f.groupdict()
relatedFiles.append(f)
self.log.info('completed the ``_parse_related_files`` method')
return relatedFiles
|
thespacedoctor/transientNamer
|
transientNamer/cl_utils.py
|
main
|
python
|
def main(arguments=None):
# setup the command-line util settings
su = tools(
arguments=arguments,
docString=__doc__,
logLevel="WARNING",
options_first=False,
projectName="transientNamer"
)
arguments, settings, log, dbConn = su.setup()
# tab completion for raw_input
readline.set_completer_delims(' \t\n;')
readline.parse_and_bind("tab: complete")
readline.set_completer(tab_complete)
# unpack remaining cl arguments using `exec` to setup the variable names
# automatically
for arg, val in arguments.iteritems():
if arg[0] == "-":
varname = arg.replace("-", "") + "Flag"
else:
varname = arg.replace("<", "").replace(">", "")
if isinstance(val, str) or isinstance(val, unicode):
exec(varname + " = '%s'" % (val,))
else:
exec(varname + " = %s" % (val,))
if arg == "--dbConn":
dbConn = val
log.debug('%s = %s' % (varname, val,))
## START LOGGING ##
startTime = times.get_now_sql_datetime()
log.info(
'--- STARTING TO RUN THE cl_utils.py AT %s' %
(startTime,))
if search or new or cone:
if ra:
tns = transientNamer.search(
log=log,
ra=ra,
dec=dec,
radiusArcsec=arcsecRadius,
comments=withCommentsFlag
)
if name:
tns = transientNamer.search(
log=log,
name=name,
comments=withCommentsFlag
)
if discInLastDays:
tns = transientNamer.search(
log=log,
discInLastDays=discInLastDays,
comments=withCommentsFlag
)
# Recursively create missing directories
if outputFlag and not os.path.exists(outputFlag):
os.makedirs(outputFlag)
if tableNamePrefix:
sources, phot, spec, files = tns.mysql(
tableNamePrefix=tableNamePrefix, dirPath=outputFlag)
numSources = len(sources.split("\n")) - 1
elif not render or render == "table":
sources, phot, spec, files = tns.table(dirPath=outputFlag)
numSources = len(sources.split("\n")) - 4
elif render == "csv":
sources, phot, spec, files = tns.csv(dirPath=outputFlag)
numSources = len(sources.split("\n")) - 1
elif render == "json":
sources, phot, spec, files = tns.json(dirPath=outputFlag)
numSources = len(sources.split("{")) - 1
elif render == "yaml":
sources, phot, spec, files = tns.yaml(dirPath=outputFlag)
numSources = len(sources.split("\n-"))
elif render == "markdown":
sources, phot, spec, files = tns.markdown(dirPath=outputFlag)
numSources = len(sources.split("\n")) - 2
if numSources == 1:
print "%(numSources)s transient found" % locals()
elif numSources > 1:
print "%(numSources)s transients found" % locals()
if not outputFlag:
print "\n# Matched Transients"
print sources
print "\n# Transient Photometry"
print phot
print "\n# Transient Spectra"
print spec
print "\n# Transient Supplementary Files"
print files
print "\n# Original TNS Search URL"
print tns.url
# CALL FUNCTIONS/OBJECTS
if "dbConn" in locals() and dbConn:
dbConn.commit()
dbConn.close()
## FINISH LOGGING ##
endTime = times.get_now_sql_datetime()
runningTime = times.calculate_time_difference(startTime, endTime)
log.info('-- FINISHED ATTEMPT TO RUN THE cl_utils.py AT %s (RUNTIME: %s) --' %
(endTime, runningTime, ))
return
|
*The main function used when ``cl_utils.py`` is run as a single script from the cl, or when installed as a cl command*
|
train
|
https://github.com/thespacedoctor/transientNamer/blob/39be410c84275ed4669632f5df67e728d66a318f/transientNamer/cl_utils.py#L49-L162
|
[
"def csv(\n self,\n dirPath=None):\n \"\"\"*Render the results in csv format*\n\n **Key Arguments:**\n - ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*\n\n **Return:**\n - `csvSources` -- the top-level transient data\n - `csvPhot` -- all photometry associated with the transients\n - `csvSpec` -- all spectral data associated with the transients\n - `csvFiles` -- all files associated with the matched transients found on the tns\n\n **Usage:**\n\n To render the results in csv format:\n\n .. code-block:: python\n\n csvSources, csvPhot, csvSpec, csvFiles = tns.csv()\n print csvSources\n\n .. code-block:: text\n\n TNSId,TNSName,discoveryName,discSurvey,raSex,decSex,raDeg,decDeg,transRedshift,specType,discMag,discMagFilter,discDate,objectUrl,hostName,hostRedshift,separationArcsec,separationNorthArcsec,separationEastArcsec\n 2016asf,SN2016asf,ASASSN-16cs,ASAS-SN,06:50:36.73,+31:06:45.36,102.6530,31.1126,0.021,SN Ia,17.1,V-Johnson,2016-03-06 08:09:36,http://wis-tns.weizmann.ac.il/object/2016asf,KUG 0647+311,,0.66,0.65,-0.13\n\n You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.\n\n .. code-block:: python\n\n tns.csv(\"~/tns\")\n\n .. image:: https://i.imgur.com/BwwqMBg.png\n :width: 800px\n :alt: csv output\n \"\"\"\n\n if dirPath:\n p = self._file_prefix()\n csvSources = self.sourceResults.csv(\n filepath=dirPath + \"/\" + p + \"sources.csv\")\n csvPhot = self.photResults.csv(\n filepath=dirPath + \"/\" + p + \"phot.csv\")\n csvSpec = self.specResults.csv(\n filepath=dirPath + \"/\" + p + \"spec.csv\")\n csvFiles = self.relatedFilesResults.csv(\n filepath=dirPath + \"/\" + p + \"relatedFiles.csv\")\n else:\n csvSources = self.sourceResults.csv()\n csvPhot = self.photResults.csv()\n csvSpec = self.specResults.csv()\n csvFiles = self.relatedFilesResults.csv()\n return csvSources, csvPhot, csvSpec, csvFiles\n",
"def json(\n self,\n dirPath=None):\n \"\"\"*Render the results in json format*\n\n **Key Arguments:**\n - ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*\n\n **Return:**\n - `jsonSources` -- the top-level transient data\n - `jsonPhot` -- all photometry associated with the transients\n - `jsonSpec` -- all spectral data associated with the transients\n - `jsonFiles` -- all files associated with the matched transients found on the tns\n\n **Usage:**\n\n To render the results in json format:\n\n .. code-block:: python\n\n jsonSources, jsonPhot, jsonSpec, jsonFiles = tns.json()\n print jsonSources\n\n .. code-block:: text\n\n [\n {\n \"TNSId\": \"2016asf\",\n \"TNSName\": \"SN2016asf\",\n \"decDeg\": 31.1126,\n \"decSex\": \"+31:06:45.36\",\n \"discDate\": \"2016-03-06 08:09:36\",\n \"discMag\": \"17.1\",\n \"discMagFilter\": \"V-Johnson\",\n \"discSurvey\": \"ASAS-SN\",\n \"discoveryName\": \"ASASSN-16cs\",\n \"hostName\": \"KUG 0647+311\",\n \"hostRedshift\": null,\n \"objectUrl\": \"http://wis-tns.weizmann.ac.il/object/2016asf\",\n \"raDeg\": 102.65304166666667,\n \"raSex\": \"06:50:36.73\",\n \"separationArcsec\": \"0.66\",\n \"separationEastArcsec\": \"-0.13\",\n \"separationNorthArcsec\": \"0.65\",\n \"specType\": \"SN Ia\",\n \"transRedshift\": \"0.021\"\n }\n ]\n\n You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.\n\n .. code-block:: python\n\n tns.json(\"~/tns\")\n\n .. image:: https://i.imgur.com/wAHqARI.png\n :width: 800px\n :alt: json output\n \"\"\"\n\n if dirPath:\n p = self._file_prefix()\n jsonSources = self.sourceResults.json(\n filepath=dirPath + \"/\" + p + \"sources.json\")\n jsonPhot = self.photResults.json(\n filepath=dirPath + \"/\" + p + \"phot.json\")\n jsonSpec = self.specResults.json(\n filepath=dirPath + \"/\" + p + \"spec.json\")\n jsonFiles = self.relatedFilesResults.json(\n filepath=dirPath + \"/\" + p + \"relatedFiles.json\")\n else:\n jsonSources = self.sourceResults.json()\n jsonPhot = self.photResults.json()\n jsonSpec = self.specResults.json()\n jsonFiles = self.relatedFilesResults.json()\n return jsonSources, jsonPhot, jsonSpec, jsonFiles\n",
"def yaml(\n self,\n dirPath=None):\n \"\"\"*Render the results in yaml format*\n\n **Key Arguments:**\n - ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*\n\n **Return:**\n - `yamlSources` -- the top-level transient data\n - `yamlPhot` -- all photometry associated with the transients\n - `yamlSpec` -- all spectral data associated with the transients\n - `yamlFiles` -- all files associated with the matched transients found on the tns\n\n **Usage:**\n\n To render the results in yaml format:\n\n .. code-block:: python\n\n yamlSources, yamlPhot, yamlSpec, yamlFiles = tns.yaml()\n print yamlSources\n\n .. code-block:: text\n\n - TNSId: 2016asf\n TNSName: SN2016asf\n decDeg: 31.1126\n decSex: '+31:06:45.36'\n discDate: '2016-03-06 08:09:36'\n discMag: '17.1'\n discMagFilter: V-Johnson\n discSurvey: ASAS-SN\n discoveryName: ASASSN-16cs\n hostName: KUG 0647+311\n hostRedshift: null\n objectUrl: http://wis-tns.weizmann.ac.il/object/2016asf\n raDeg: 102.65304166666667\n raSex: '06:50:36.73'\n separationArcsec: '0.66'\n separationEastArcsec: '-0.13'\n separationNorthArcsec: '0.65'\n specType: SN Ia\n transRedshift: '0.021'\n\n You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.\n\n .. code-block:: python\n\n tns.yaml(\"~/tns\")\n\n .. image:: https://i.imgur.com/ZpJIC6p.png\n :width: 800px\n :alt: yaml output\n \"\"\"\n\n if dirPath:\n p = self._file_prefix()\n yamlSources = self.sourceResults.yaml(\n filepath=dirPath + \"/\" + p + \"sources.yaml\")\n yamlPhot = self.photResults.yaml(\n filepath=dirPath + \"/\" + p + \"phot.yaml\")\n yamlSpec = self.specResults.yaml(\n filepath=dirPath + \"/\" + p + \"spec.yaml\")\n yamlFiles = self.relatedFilesResults.yaml(\n filepath=dirPath + \"/\" + p + \"relatedFiles.yaml\")\n else:\n yamlSources = self.sourceResults.yaml()\n yamlPhot = self.photResults.yaml()\n yamlSpec = self.specResults.yaml()\n yamlFiles = self.relatedFilesResults.yaml()\n return yamlSources, yamlPhot, yamlSpec, yamlFiles\n",
"def markdown(\n self,\n dirPath=None):\n \"\"\"*Render the results in markdown format*\n\n **Key Arguments:**\n - ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*\n\n **Return:**\n - `markdownSources` -- the top-level transient data\n - `markdownPhot` -- all photometry associated with the transients\n - `markdownSpec` -- all spectral data associated with the transients\n - `markdownFiles` -- all files associated with the matched transients found on the tns\n\n **Usage:**\n\n To render the results in markdown table format:\n\n .. code-block:: python\n\n markdownSources, markdownPhot, markdownSpec, markdownFiles = tns.markdown()\n print markdownSources\n\n .. code-block:: text\n\n | TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |\n |:---------|:-----------|:---------------|:------------|:-------------|:--------------|:----------|:---------|:---------------|:----------|:---------|:---------------|:---------------------|:----------------------------------------------|:--------------|:--------------|:------------------|:-----------------------|:----------------------|\n | 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |\n\n You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.\n\n .. code-block:: python\n\n tns.markdown(\"~/tns\")\n\n .. image:: https://i.imgur.com/AYLBQoJ.png\n :width: 800px\n :alt: markdown output\n \"\"\"\n\n if dirPath:\n p = self._file_prefix()\n markdownSources = self.sourceResults.markdown(\n filepath=dirPath + \"/\" + p + \"sources.md\")\n markdownPhot = self.photResults.markdown(\n filepath=dirPath + \"/\" + p + \"phot.md\")\n markdownSpec = self.specResults.markdown(\n filepath=dirPath + \"/\" + p + \"spec.md\")\n markdownFiles = self.relatedFilesResults.markdown(\n filepath=dirPath + \"/\" + p + \"relatedFiles.md\")\n else:\n markdownSources = self.sourceResults.markdown()\n markdownPhot = self.photResults.markdown()\n markdownSpec = self.specResults.markdown()\n markdownFiles = self.relatedFilesResults.markdown()\n return markdownSources, markdownPhot, markdownSpec, markdownFiles\n",
"def table(\n self,\n dirPath=None):\n \"\"\"*Render the results as an ascii table*\n\n **Key Arguments:**\n - ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*\n\n **Return:**\n - `tableSources` -- the top-level transient data\n - `tablePhot` -- all photometry associated with the transients\n - `tableSpec` -- all spectral data associated with the transients\n - `tableFiles` -- all files associated with the matched transients found on the tns\n\n **Usage:**\n\n To render the results in ascii table format:\n\n .. code-block:: python\n\n tableSources, tablePhot, tableSpec, tableFiles = tns.table()\n print tableSources\n\n .. code-block:: text\n\n +----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+\n | TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |\n +----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+\n | 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |\n +----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+\n\n You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.\n\n .. code-block:: python\n\n tns.table(\"~/tns\")\n\n .. image:: https://i.imgur.com/m09M0ho.png\n :width: 800px\n :alt: ascii files\n \"\"\"\n\n if dirPath:\n p = self._file_prefix()\n tableSources = self.sourceResults.table(\n filepath=dirPath + \"/\" + p + \"sources.ascii\")\n tablePhot = self.photResults.table(\n filepath=dirPath + \"/\" + p + \"phot.ascii\")\n tableSpec = self.specResults.table(\n filepath=dirPath + \"/\" + p + \"spec.ascii\")\n tableFiles = self.relatedFilesResults.table(\n filepath=dirPath + \"/\" + p + \"relatedFiles.ascii\")\n else:\n tableSources = self.sourceResults.table()\n tablePhot = self.photResults.table()\n tableSpec = self.specResults.table()\n tableFiles = self.relatedFilesResults.table()\n return tableSources, tablePhot, tableSpec, tableFiles\n",
" def mysql(\n self,\n tableNamePrefix=\"TNS\",\n dirPath=None):\n \"\"\"*Render the results as MySQL Insert statements*\n\n **Key Arguments:**\n - ``tableNamePrefix`` -- the prefix for the database table names to assign the insert statements to. Default *TNS*.\n - ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*\n\n **Return:**\n - `mysqlSources` -- the top-level transient data\n - `mysqlPhot` -- all photometry associated with the transients\n - `mysqlSpec` -- all spectral data associated with the transients\n - `mysqlFiles` -- all files associated with the matched transients found on the tns\n\n **Usage:**\n\n To render the results in mysql insert format:\n\n .. code-block:: python\n\n mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles = tns.mysql(\"TNS\")\n print mysqlSources\n\n .. code-block:: text\n\n INSERT INTO `TNS_sources` (TNSId,TNSName,dateCreated,decDeg,decSex,discDate,discMag,discMagFilter,discSurvey,discoveryName,hostName,hostRedshift,objectUrl,raDeg,raSex,separationArcsec,separationEastArcsec,separationNorthArcsec,specType,transRedshift) VALUES (\"2016asf\" ,\"SN2016asf\" ,\"2016-09-20T11:22:13\" ,\"31.1126\" ,\"+31:06:45.36\" ,\"2016-03-06 08:09:36\" ,\"17.1\" ,\"V-Johnson\" ,\"ASAS-SN\" ,\"ASASSN-16cs\" ,\"KUG 0647+311\" ,null ,\"http://wis-tns.weizmann.ac.il/object/2016asf\" ,\"102.653041667\" ,\"06:50:36.73\" ,\"0.66\" ,\"-0.13\" ,\"0.65\" ,\"SN Ia\" ,\"0.021\") ON DUPLICATE KEY UPDATE TNSId=\"2016asf\", TNSName=\"SN2016asf\", dateCreated=\"2016-09-20T11:22:13\", decDeg=\"31.1126\", decSex=\"+31:06:45.36\", discDate=\"2016-03-06 08:09:36\", discMag=\"17.1\", discMagFilter=\"V-Johnson\", discSurvey=\"ASAS-SN\", discoveryName=\"ASASSN-16cs\", hostName=\"KUG 0647+311\", hostRedshift=null, objectUrl=\"http://wis-tns.weizmann.ac.il/object/2016asf\", raDeg=\"102.653041667\", raSex=\"06:50:36.73\", separationArcsec=\"0.66\", separationEastArcsec=\"-0.13\", separationNorthArcsec=\"0.65\", specType=\"SN Ia\", transRedshift=\"0.021\", updated=1, dateLastModified=NOW() ;\n\n You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.\n\n .. code-block:: python\n\n tns.mysql(\"TNS\", \"~/tns\")\n\n .. image:: https://i.imgur.com/CozySPW.png\n :width: 800px\n :alt: mysql output\n \"\"\"\n if dirPath:\n p = self._file_prefix()\n\n createStatement = \"\"\"\nCREATE TABLE `%(tableNamePrefix)s_sources` (\n `primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',\n `TNSId` varchar(20) NOT NULL,\n `TNSName` varchar(20) DEFAULT NULL,\n `dateCreated` datetime DEFAULT NULL,\n `decDeg` double DEFAULT NULL,\n `decSex` varchar(45) DEFAULT NULL,\n `discDate` datetime DEFAULT NULL,\n `discMag` double DEFAULT NULL,\n `discMagFilter` varchar(45) DEFAULT NULL,\n `discSurvey` varchar(100) DEFAULT NULL,\n `discoveryName` varchar(100) DEFAULT NULL,\n `objectUrl` varchar(200) DEFAULT NULL,\n `raDeg` double DEFAULT NULL,\n `raSex` varchar(45) DEFAULT NULL,\n `specType` varchar(100) DEFAULT NULL,\n `transRedshift` double DEFAULT NULL,\n `updated` tinyint(4) DEFAULT '0',\n `dateLastModified` datetime DEFAULT NULL,\n `hostName` VARCHAR(100) NULL DEFAULT NULL,\n `hostRedshift` DOUBLE NULL DEFAULT NULL, \n `survey` VARCHAR(100) NULL DEFAULT NULL,\n PRIMARY KEY (`primaryId`),\n UNIQUE KEY `tnsid` (`TNSId`)\n) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;\n \"\"\" % locals()\n\n mysqlSources = self.sourceResults.mysql(\n tableNamePrefix + \"_sources\", filepath=dirPath + \"/\" + p + \"sources.sql\", createStatement=createStatement)\n\n createStatement = \"\"\"\nCREATE TABLE `%(tableNamePrefix)s_photometry` (\n `primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',\n `TNSId` varchar(20) NOT NULL,\n `dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,\n `exptime` double DEFAULT NULL,\n `filter` varchar(100) DEFAULT NULL,\n `limitingMag` tinyint(4) DEFAULT NULL,\n `mag` double DEFAULT NULL,\n `magErr` double DEFAULT NULL,\n `magUnit` varchar(100) DEFAULT NULL,\n `objectName` varchar(100) DEFAULT NULL,\n `obsdate` datetime DEFAULT NULL,\n `reportAddedDate` datetime DEFAULT NULL,\n `suggestedType` varchar(100) DEFAULT NULL,\n `survey` varchar(100) DEFAULT NULL,\n `telescope` varchar(100) DEFAULT NULL,\n `updated` tinyint(4) DEFAULT '0',\n `dateLastModified` datetime DEFAULT NULL,\n `remarks` VARCHAR(800) NULL DEFAULT NULL,\n `sourceComment` VARCHAR(800) NULL DEFAULT NULL,\n PRIMARY KEY (`primaryId`),\n UNIQUE KEY `tnsid_survey_obsdate` (`TNSId`,`survey`,`obsdate`),\n UNIQUE INDEX `u_tnsid_survey_obsdate` (`TNSId` ASC, `survey` ASC, `obsdate` ASC),\n UNIQUE INDEX `u_tnsid_obsdate_objname` (`TNSId` ASC, `obsdate` ASC, `objectName` ASC)\n) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;\n \"\"\" % locals()\n\n mysqlPhot = self.photResults.mysql(\n tableNamePrefix + \"_photometry\", filepath=dirPath + \"/\" + p + \"phot.sql\", createStatement=createStatement)\n\n createStatement = \"\"\"\nCREATE TABLE `%(tableNamePrefix)s_spectra` (\n `primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',\n `TNSId` varchar(45) NOT NULL,\n `TNSuser` varchar(45) DEFAULT NULL,\n `dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,\n `exptime` double DEFAULT NULL,\n `obsdate` datetime DEFAULT NULL,\n `reportAddedDate` datetime DEFAULT NULL,\n `specType` varchar(100) DEFAULT NULL,\n `survey` varchar(100) DEFAULT NULL,\n `telescope` varchar(100) DEFAULT NULL,\n `transRedshift` double DEFAULT NULL,\n `updated` tinyint(4) DEFAULT '0',\n `dateLastModified` datetime DEFAULT NULL,\n `remarks` VARCHAR(800) NULL DEFAULT NULL,\n `sourceComment` VARCHAR(800) NULL DEFAULT NULL,\n PRIMARY KEY (`primaryId`),\n UNIQUE KEY `u_tnsid_survey_obsdate` (`TNSId`,`survey`,`obsdate`),\n UNIQUE KEY `u_id_user_obsdate` (`TNSId`,`TNSuser`,`obsdate`)\n) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;\n \"\"\" % locals()\n\n mysqlSpec = self.specResults.mysql(\n tableNamePrefix + \"_spectra\", filepath=dirPath + \"/\" + p + \"spec.sql\", createStatement=createStatement)\n\n createStatement = \"\"\"\nCREATE TABLE `%(tableNamePrefix)s_files` (\n `primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',\n `TNSId` varchar(100) NOT NULL,\n `dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,\n `dateObs` datetime DEFAULT NULL,\n `filename` varchar(200) DEFAULT NULL,\n `spec1phot2` tinyint(4) DEFAULT NULL,\n `url` varchar(800) DEFAULT NULL,\n `updated` tinyint(4) DEFAULT '0',\n `dateLastModified` datetime DEFAULT NULL,\n `comment` VARCHAR(800) NULL DEFAULT NULL,\n PRIMARY KEY (`primaryId`),\n UNIQUE KEY `tnsid_url` (`TNSId`,`url`)\n) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;\n \"\"\" % locals()\n\n mysqlFiles = self.relatedFilesResults.mysql(\n tableNamePrefix + \"_files\", filepath=dirPath + \"/\" + p + \"relatedFiles.sql\", createStatement=createStatement)\n else:\n mysqlSources = self.sourceResults.mysql(\n tableNamePrefix + \"_sources\")\n mysqlPhot = self.photResults.mysql(tableNamePrefix + \"_photometry\")\n mysqlSpec = self.specResults.mysql(tableNamePrefix + \"_spectra\")\n mysqlFiles = self.relatedFilesResults.mysql(\n tableNamePrefix + \"_files\")\n return mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles\n"
] |
#!/usr/local/bin/python
# encoding: utf-8
"""
Documentation for transientNamer can be found here: http://transientNamer.readthedocs.org/en/stable
Usage:
transientNamer [-c] cone <ra> <dec> <arcsecRadius> [<render> | mysql <tableNamePrefix>] [-o directory]
transientNamer [-c] search <name> [<render> | mysql <tableNamePrefix>] [-o directory]
transientNamer [-c] new <discInLastDays> [<render> | mysql <tableNamePrefix>] [-o directory]
Commands:
cone perform a conesearch on the TNS
search perform a name search on the TNS
new list newly discovered TNS objects
Arguments:
ra
dec
arcsecRadius
name the name of the object the search for (TNS or survey name)
render output format for results. Options include json, csv, table, markdown, yaml
tableNamePrefix the prefix for the tables to write the mysql insert statements for
dirPath path to the directory to save the output to
Options:
-h, --help show this help message
-v, --version show version
-s, --settings the settings file
-c, --withComments return TNS comments in result sets
-o directory, --output=directory output to files in the directory path
"""
################# GLOBAL IMPORTS ####################
import sys
import os
os.environ['TERM'] = 'vt100'
import readline
import glob
import pickle
from docopt import docopt
from fundamentals import tools, times
import transientNamer
# from ..__init__ import *
def tab_complete(text, state):
return (glob.glob(text + '*') + [None])[state]
if __name__ == '__main__':
main()
|
watchforstock/evohome-client
|
evohomeclient2/location.py
|
Location.status
|
python
|
def status(self):
response = requests.get(
"https://tccna.honeywell.com/WebAPI/emea/api/v1/"
"location/%s/status?includeTemperatureControlSystems=True" %
self.locationId,
headers=self.client._headers() # pylint: disable=protected-access
)
response.raise_for_status()
data = response.json()
# Now feed into other elements
for gw_data in data['gateways']:
gateway = self.gateways[gw_data['gatewayId']]
for sys in gw_data["temperatureControlSystems"]:
system = gateway.control_systems[sys['systemId']]
system.__dict__.update(
{'systemModeStatus': sys['systemModeStatus'],
'activeFaults': sys['activeFaults']})
if 'dhw' in sys:
system.hotwater.__dict__.update(sys['dhw'])
for zone_data in sys["zones"]:
zone = system.zones[zone_data['name']]
zone.__dict__.update(zone_data)
return data
|
Retrieves the location status.
|
train
|
https://github.com/watchforstock/evohome-client/blob/f1cb9273e97946d79c0651f00a218abbf7ada53a/evohomeclient2/location.py#L26-L55
| null |
class Location(object): # pylint: disable=too-few-public-methods,useless-object-inheritance
"""Provides handling of a location."""
def __init__(self, client, data=None):
self.client = client
self._gateways = []
self.gateways = {}
self.locationId = None # pylint: disable=invalid-name
if data is not None:
self.__dict__.update(data['locationInfo'])
for gw_data in data['gateways']:
gateway = Gateway(client, self, gw_data)
self._gateways.append(gateway)
self.gateways[gateway.gatewayId] = gateway # pylint: disable=no-member
self.status()
|
watchforstock/evohome-client
|
evohomeclient2/hotwater.py
|
HotWater.set_dhw_on
|
python
|
def set_dhw_on(self, until=None):
if until is None:
data = {"Mode": "PermanentOverride",
"State": "On",
"UntilTime": None}
else:
data = {"Mode": "TemporaryOverride",
"State": "On",
"UntilTime": until.strftime('%Y-%m-%dT%H:%M:%SZ')}
self._set_dhw(data)
|
Sets the DHW on until a given time, or permanently.
|
train
|
https://github.com/watchforstock/evohome-client/blob/f1cb9273e97946d79c0651f00a218abbf7ada53a/evohomeclient2/hotwater.py#L34-L45
|
[
"def _set_dhw(self, data):\n headers = dict(self.client._headers()) # pylint: disable=protected-access\n headers['Content-Type'] = 'application/json'\n url = (\n \"https://tccna.honeywell.com/WebAPI/emea/api/v1\"\n \"/domesticHotWater/%s/state\" % self.dhwId\n )\n\n response = requests.put(url, data=json.dumps(data), headers=headers)\n response.raise_for_status()\n"
] |
class HotWater(ZoneBase):
"""Provides handling of the hot water zone."""
def __init__(self, client, data):
super(HotWater, self).__init__(client)
self.dhwId = None # pylint: disable=invalid-name
self.__dict__.update(data)
self.name = ""
self.zoneId = self.dhwId
self.zone_type = 'domesticHotWater'
def _set_dhw(self, data):
headers = dict(self.client._headers()) # pylint: disable=protected-access
headers['Content-Type'] = 'application/json'
url = (
"https://tccna.honeywell.com/WebAPI/emea/api/v1"
"/domesticHotWater/%s/state" % self.dhwId
)
response = requests.put(url, data=json.dumps(data), headers=headers)
response.raise_for_status()
def set_dhw_off(self, until=None):
"""Sets the DHW off until a given time, or permanently."""
if until is None:
data = {"Mode": "PermanentOverride",
"State": "Off",
"UntilTime": None}
else:
data = {"Mode": "TemporaryOverride",
"State": "Off",
"UntilTime": until.strftime('%Y-%m-%dT%H:%M:%SZ')}
self._set_dhw(data)
def set_dhw_auto(self):
"""Sets the DHW to follow the schedule."""
data = {"Mode": "FollowSchedule",
"State": "",
"UntilTime": None}
self._set_dhw(data)
|
watchforstock/evohome-client
|
evohomeclient2/hotwater.py
|
HotWater.set_dhw_off
|
python
|
def set_dhw_off(self, until=None):
if until is None:
data = {"Mode": "PermanentOverride",
"State": "Off",
"UntilTime": None}
else:
data = {"Mode": "TemporaryOverride",
"State": "Off",
"UntilTime": until.strftime('%Y-%m-%dT%H:%M:%SZ')}
self._set_dhw(data)
|
Sets the DHW off until a given time, or permanently.
|
train
|
https://github.com/watchforstock/evohome-client/blob/f1cb9273e97946d79c0651f00a218abbf7ada53a/evohomeclient2/hotwater.py#L47-L58
|
[
"def _set_dhw(self, data):\n headers = dict(self.client._headers()) # pylint: disable=protected-access\n headers['Content-Type'] = 'application/json'\n url = (\n \"https://tccna.honeywell.com/WebAPI/emea/api/v1\"\n \"/domesticHotWater/%s/state\" % self.dhwId\n )\n\n response = requests.put(url, data=json.dumps(data), headers=headers)\n response.raise_for_status()\n"
] |
class HotWater(ZoneBase):
"""Provides handling of the hot water zone."""
def __init__(self, client, data):
super(HotWater, self).__init__(client)
self.dhwId = None # pylint: disable=invalid-name
self.__dict__.update(data)
self.name = ""
self.zoneId = self.dhwId
self.zone_type = 'domesticHotWater'
def _set_dhw(self, data):
headers = dict(self.client._headers()) # pylint: disable=protected-access
headers['Content-Type'] = 'application/json'
url = (
"https://tccna.honeywell.com/WebAPI/emea/api/v1"
"/domesticHotWater/%s/state" % self.dhwId
)
response = requests.put(url, data=json.dumps(data), headers=headers)
response.raise_for_status()
def set_dhw_on(self, until=None):
"""Sets the DHW on until a given time, or permanently."""
if until is None:
data = {"Mode": "PermanentOverride",
"State": "On",
"UntilTime": None}
else:
data = {"Mode": "TemporaryOverride",
"State": "On",
"UntilTime": until.strftime('%Y-%m-%dT%H:%M:%SZ')}
self._set_dhw(data)
def set_dhw_auto(self):
"""Sets the DHW to follow the schedule."""
data = {"Mode": "FollowSchedule",
"State": "",
"UntilTime": None}
self._set_dhw(data)
|
watchforstock/evohome-client
|
evohomeclient2/zone.py
|
ZoneBase.schedule
|
python
|
def schedule(self):
response = requests.get(
"https://tccna.honeywell.com/WebAPI/emea/api/v1"
"/%s/%s/schedule" % (self.zone_type, self.zoneId),
headers=self.client._headers() # pylint: disable=no-member,protected-access
)
response.raise_for_status()
mapping = [
('dailySchedules', 'DailySchedules'),
('dayOfWeek', 'DayOfWeek'),
('temperature', 'TargetTemperature'),
('timeOfDay', 'TimeOfDay'),
('switchpoints', 'Switchpoints'),
('dhwState', 'DhwState'),
]
response_data = response.text
for from_val, to_val in mapping:
response_data = response_data.replace(from_val, to_val)
data = json.loads(response_data)
# change the day name string to a number offset (0 = Monday)
for day_of_week, schedule in enumerate(data['DailySchedules']):
schedule['DayOfWeek'] = day_of_week
return data
|
Gets the schedule for the given zone
|
train
|
https://github.com/watchforstock/evohome-client/blob/f1cb9273e97946d79c0651f00a218abbf7ada53a/evohomeclient2/zone.py#L16-L42
| null |
class ZoneBase(object): # pylint: disable=useless-object-inheritance
"""Provides the base for Zones"""
def __init__(self, client):
self.client = client
self.name = None
self.zoneId = None # pylint: disable=invalid-name
self.zone_type = None
def set_schedule(self, zone_info):
"""Sets the schedule for this zone"""
# must only POST json, otherwise server API handler raises exceptions
try:
json.loads(zone_info)
except ValueError as error:
raise ValueError("zone_info must be valid JSON: ", error)
headers = dict(self.client._headers()) # pylint: disable=protected-access
headers['Content-Type'] = 'application/json'
response = requests.put(
"https://tccna.honeywell.com/WebAPI/emea/api/v1"
"/%s/%s/schedule" % (self.zone_type, self.zoneId),
data=zone_info, headers=headers
)
response.raise_for_status()
return response.json()
|
watchforstock/evohome-client
|
evohomeclient2/zone.py
|
ZoneBase.set_schedule
|
python
|
def set_schedule(self, zone_info):
# must only POST json, otherwise server API handler raises exceptions
try:
json.loads(zone_info)
except ValueError as error:
raise ValueError("zone_info must be valid JSON: ", error)
headers = dict(self.client._headers()) # pylint: disable=protected-access
headers['Content-Type'] = 'application/json'
response = requests.put(
"https://tccna.honeywell.com/WebAPI/emea/api/v1"
"/%s/%s/schedule" % (self.zone_type, self.zoneId),
data=zone_info, headers=headers
)
response.raise_for_status()
return response.json()
|
Sets the schedule for this zone
|
train
|
https://github.com/watchforstock/evohome-client/blob/f1cb9273e97946d79c0651f00a218abbf7ada53a/evohomeclient2/zone.py#L44-L63
| null |
class ZoneBase(object): # pylint: disable=useless-object-inheritance
"""Provides the base for Zones"""
def __init__(self, client):
self.client = client
self.name = None
self.zoneId = None # pylint: disable=invalid-name
self.zone_type = None
def schedule(self):
"""Gets the schedule for the given zone"""
response = requests.get(
"https://tccna.honeywell.com/WebAPI/emea/api/v1"
"/%s/%s/schedule" % (self.zone_type, self.zoneId),
headers=self.client._headers() # pylint: disable=no-member,protected-access
)
response.raise_for_status()
mapping = [
('dailySchedules', 'DailySchedules'),
('dayOfWeek', 'DayOfWeek'),
('temperature', 'TargetTemperature'),
('timeOfDay', 'TimeOfDay'),
('switchpoints', 'Switchpoints'),
('dhwState', 'DhwState'),
]
response_data = response.text
for from_val, to_val in mapping:
response_data = response_data.replace(from_val, to_val)
data = json.loads(response_data)
# change the day name string to a number offset (0 = Monday)
for day_of_week, schedule in enumerate(data['DailySchedules']):
schedule['DayOfWeek'] = day_of_week
return data
|
watchforstock/evohome-client
|
evohomeclient/__init__.py
|
EvohomeClient.temperatures
|
python
|
def temperatures(self, force_refresh=False):
self._populate_full_data(force_refresh)
for device in self.full_data['devices']:
set_point = 0
status = ""
if 'heatSetpoint' in device['thermostat']['changeableValues']:
set_point = float(
device['thermostat']['changeableValues']["heatSetpoint"]["value"])
status = device['thermostat']['changeableValues']["heatSetpoint"]["status"]
else:
status = device['thermostat']['changeableValues']['status']
yield {'thermostat': device['thermostatModelType'],
'id': device['deviceID'],
'name': device['name'],
'temp': float(device['thermostat']['indoorTemperature']),
'setpoint': set_point,
'status': status,
'mode': device['thermostat']['changeableValues']['mode']}
|
Retrieve the current details for each zone. Returns a generator.
|
train
|
https://github.com/watchforstock/evohome-client/blob/f1cb9273e97946d79c0651f00a218abbf7ada53a/evohomeclient/__init__.py#L115-L133
|
[
"def _populate_full_data(self, force_refresh=False):\n if self.full_data is None or force_refresh:\n self._populate_user_info()\n\n user_id = self.user_data['userInfo']['userID']\n session_id = self.user_data['sessionId']\n\n url = (self.hostname + \"/WebAPI/api/locations\"\n \"?userId=%s&allData=True\" % user_id)\n self.headers['sessionId'] = session_id\n\n response = self._do_request('get', url, json.dumps(self.postdata))\n\n self.full_data = self._convert(response.content)[0]\n\n self.location_id = self.full_data['locationID']\n\n self.devices = {}\n self.named_devices = {}\n\n for device in self.full_data['devices']:\n self.devices[device['deviceID']] = device\n self.named_devices[device['name']] = device\n"
] |
class EvohomeClient(object): # pylint: disable=useless-object-inheritance
"""Provides a client to access the Honeywell Evohome system"""
# pylint: disable=too-many-instance-attributes,too-many-arguments
def __init__(self, username, password, debug=False, user_data=None,
hostname="https://tccna.honeywell.com"):
"""Constructor. Takes the username and password for the service.
If user_data is given then this will be used to try and reduce
the number of calls to the authentication service which is known
to be rate limited.
"""
if debug is True:
_LOGGER.setLevel(logging.DEBUG)
_LOGGER.debug("Debug mode is explicitly enabled.")
requests_logger = logging.getLogger("requests.packages.urllib3")
requests_logger.setLevel(logging.DEBUG)
requests_logger.propagate = True
http_client.HTTPConnection.debuglevel = 1
else:
_LOGGER.debug(
"Debug mode is not explicitly enabled "
"(but may be enabled elsewhere)."
)
self.username = username
self.password = password
self.user_data = user_data
self.hostname = hostname
self.full_data = None
self.gateway_data = None
self.reader = codecs.getdecoder("utf-8")
self.location_id = ""
self.devices = {}
self.named_devices = {}
self.postdata = {}
self.headers = {}
def _convert(self, content):
return json.loads(self.reader(content)[0])
def _populate_full_data(self, force_refresh=False):
if self.full_data is None or force_refresh:
self._populate_user_info()
user_id = self.user_data['userInfo']['userID']
session_id = self.user_data['sessionId']
url = (self.hostname + "/WebAPI/api/locations"
"?userId=%s&allData=True" % user_id)
self.headers['sessionId'] = session_id
response = self._do_request('get', url, json.dumps(self.postdata))
self.full_data = self._convert(response.content)[0]
self.location_id = self.full_data['locationID']
self.devices = {}
self.named_devices = {}
for device in self.full_data['devices']:
self.devices[device['deviceID']] = device
self.named_devices[device['name']] = device
def _populate_user_info(self):
if self.user_data is None:
url = self.hostname + "/WebAPI/api/Session"
self.postdata = {'Username': self.username,
'Password': self.password,
'ApplicationId': '91db1612-73fd-4500-91b2-e63b069b185c'}
self.headers = {'content-type': 'application/json'}
response = self._do_request(
'post', url, data=json.dumps(self.postdata), retry=False)
self.user_data = self._convert(response.content)
return self.user_data
def get_modes(self, zone):
"""Returns the set of modes the device can be assigned."""
self._populate_full_data()
device = self._get_device(zone)
return device['thermostat']['allowedModes']
def _get_device(self, zone):
if isinstance(zone, str) or (IS_PY2 and isinstance(zone, basestring)): # noqa: F821; pylint: disable=undefined-variable
device = self.named_devices[zone]
else:
device = self.devices[zone]
return device
def _get_task_status(self, task_id):
self._populate_full_data()
url = (self.hostname + "/WebAPI/api/commTasks"
"?commTaskId=%s" % task_id)
response = self._do_request('get', url)
return self._convert(response.content)['state']
def _get_task_id(self, response):
ret = self._convert(response.content)
if isinstance(ret, list):
task_id = ret[0]['id']
else:
task_id = ret['id']
return task_id
def _do_request(self, method, url, data=None, retry=True):
if method == 'get':
func = requests.get
elif method == 'put':
func = requests.put
elif method == 'post':
func = requests.post
response = func(url, data=data, headers=self.headers)
# catch 401/unauthorized since we may retry
if response.status_code == requests.codes.unauthorized and retry: # pylint: disable=no-member
# Attempt to refresh sessionId if it has expired
if 'code' in response.text: # don't use response.json() here!
if response.json()[0]['code'] == "Unauthorized":
_LOGGER.debug("Session expired, re-authenticating...")
# Get a new sessionId
self.user_data = None
self._populate_user_info()
# Set headers with new sessionId
session_id = self.user_data['sessionId']
self.headers['sessionId'] = session_id
_LOGGER.debug("sessionId = %s", session_id)
response = func(url, data=data, headers=self.headers)
# display error message if the vendor provided one
if response.status_code != requests.codes.ok: # pylint: disable=no-member
if 'code' in response.text: # don't use response.json()!
message = ("HTTP Status = " + str(response.status_code) +
", Response = " + response.text)
raise requests.HTTPError(message)
response.raise_for_status()
return response
def _set_status(self, status, until=None):
self._populate_full_data()
url = (self.hostname + "/WebAPI/api/evoTouchSystems"
"?locationId=%s" % self.location_id)
if until is None:
data = {"QuickAction": status, "QuickActionNextTime": None}
else:
data = {
"QuickAction": status,
"QuickActionNextTime": "%sT00:00:00Z" % until.strftime('%Y-%m-%d')
}
response = self._do_request('put', url, json.dumps(data))
task_id = self._get_task_id(response)
while self._get_task_status(task_id) != 'Succeeded':
time.sleep(1)
def set_status_normal(self):
"""Sets the system to normal operation."""
self._set_status('Auto')
def set_status_custom(self, until=None):
"""Sets the system to the custom programme."""
self._set_status('Custom', until)
def set_status_eco(self, until=None):
"""Sets the system to the eco mode."""
self._set_status('AutoWithEco', until)
def set_status_away(self, until=None):
"""Sets the system to the away mode."""
self._set_status('Away', until)
def set_status_dayoff(self, until=None):
"""Sets the system to the day off mode."""
self._set_status('DayOff', until)
def set_status_heatingoff(self, until=None):
"""Sets the system to the heating off mode."""
self._set_status('HeatingOff', until)
def _get_device_id(self, zone):
device = self._get_device(zone)
return device['deviceID']
def _set_heat_setpoint(self, zone, data):
self._populate_full_data()
device_id = self._get_device_id(zone)
url = (self.hostname + "/WebAPI/api/devices"
"/%s/thermostat/changeableValues/heatSetpoint" % device_id)
response = self._do_request('put', url, json.dumps(data))
task_id = self._get_task_id(response)
while self._get_task_status(task_id) != 'Succeeded':
time.sleep(1)
def set_temperature(self, zone, temperature, until=None):
"""Sets the temperature of the given zone."""
if until is None:
data = {"Value": temperature, "Status": "Hold", "NextTime": None}
else:
data = {"Value": temperature,
"Status": "Temporary",
"NextTime": until.strftime('%Y-%m-%dT%H:%M:%SZ')}
self._set_heat_setpoint(zone, data)
def cancel_temp_override(self, zone):
"""Removes an existing temperature override."""
data = {"Value": None, "Status": "Scheduled", "NextTime": None}
self._set_heat_setpoint(zone, data)
def _get_dhw_zone(self):
for device in self.full_data['devices']:
if device['thermostatModelType'] == 'DOMESTIC_HOT_WATER':
return device['deviceID']
return None
def _set_dhw(self, status="Scheduled", mode=None, next_time=None):
"""Set DHW to On, Off or Auto, either indefinitely, or until a
specified time.
"""
data = {"Status": status,
"Mode": mode,
"NextTime": next_time,
"SpecialModes": None,
"HeatSetpoint": None,
"CoolSetpoint": None}
self._populate_full_data()
dhw_zone = self._get_dhw_zone()
if dhw_zone is None:
raise Exception('No DHW zone reported from API')
url = (self.hostname + "/WebAPI/api/devices"
"/%s/thermostat/changeableValues" % dhw_zone)
response = self._do_request('put', url, json.dumps(data))
task_id = self._get_task_id(response)
while self._get_task_status(task_id) != 'Succeeded':
time.sleep(1)
def set_dhw_on(self, until=None):
"""Set DHW to on, either indefinitely, or until a specified time.
When On, the DHW controller will work to keep its target temperature
at/above its target temperature. After the specified time, it will
revert to its scheduled behaviour.
"""
time_until = None if until is None else until.strftime(
'%Y-%m-%dT%H:%M:%SZ')
self._set_dhw(status="Hold", mode="DHWOn", next_time=time_until)
def set_dhw_off(self, until=None):
"""Set DHW to on, either indefinitely, or until a specified time.
When Off, the DHW controller will ignore its target temperature. After
the specified time, it will revert to its scheduled behaviour.
"""
time_until = None if until is None else until.strftime(
'%Y-%m-%dT%H:%M:%SZ')
self._set_dhw(status="Hold", mode="DHWOff", next_time=time_until)
def set_dhw_auto(self):
"""Set DHW to On or Off, according to its schedule."""
self._set_dhw(status="Scheduled")
|
watchforstock/evohome-client
|
evohomeclient/__init__.py
|
EvohomeClient.get_modes
|
python
|
def get_modes(self, zone):
self._populate_full_data()
device = self._get_device(zone)
return device['thermostat']['allowedModes']
|
Returns the set of modes the device can be assigned.
|
train
|
https://github.com/watchforstock/evohome-client/blob/f1cb9273e97946d79c0651f00a218abbf7ada53a/evohomeclient/__init__.py#L135-L139
|
[
"def _populate_full_data(self, force_refresh=False):\n if self.full_data is None or force_refresh:\n self._populate_user_info()\n\n user_id = self.user_data['userInfo']['userID']\n session_id = self.user_data['sessionId']\n\n url = (self.hostname + \"/WebAPI/api/locations\"\n \"?userId=%s&allData=True\" % user_id)\n self.headers['sessionId'] = session_id\n\n response = self._do_request('get', url, json.dumps(self.postdata))\n\n self.full_data = self._convert(response.content)[0]\n\n self.location_id = self.full_data['locationID']\n\n self.devices = {}\n self.named_devices = {}\n\n for device in self.full_data['devices']:\n self.devices[device['deviceID']] = device\n self.named_devices[device['name']] = device\n",
"def _get_device(self, zone):\n if isinstance(zone, str) or (IS_PY2 and isinstance(zone, basestring)): # noqa: F821; pylint: disable=undefined-variable\n device = self.named_devices[zone]\n else:\n device = self.devices[zone]\n return device\n"
] |
class EvohomeClient(object): # pylint: disable=useless-object-inheritance
"""Provides a client to access the Honeywell Evohome system"""
# pylint: disable=too-many-instance-attributes,too-many-arguments
def __init__(self, username, password, debug=False, user_data=None,
hostname="https://tccna.honeywell.com"):
"""Constructor. Takes the username and password for the service.
If user_data is given then this will be used to try and reduce
the number of calls to the authentication service which is known
to be rate limited.
"""
if debug is True:
_LOGGER.setLevel(logging.DEBUG)
_LOGGER.debug("Debug mode is explicitly enabled.")
requests_logger = logging.getLogger("requests.packages.urllib3")
requests_logger.setLevel(logging.DEBUG)
requests_logger.propagate = True
http_client.HTTPConnection.debuglevel = 1
else:
_LOGGER.debug(
"Debug mode is not explicitly enabled "
"(but may be enabled elsewhere)."
)
self.username = username
self.password = password
self.user_data = user_data
self.hostname = hostname
self.full_data = None
self.gateway_data = None
self.reader = codecs.getdecoder("utf-8")
self.location_id = ""
self.devices = {}
self.named_devices = {}
self.postdata = {}
self.headers = {}
def _convert(self, content):
return json.loads(self.reader(content)[0])
def _populate_full_data(self, force_refresh=False):
if self.full_data is None or force_refresh:
self._populate_user_info()
user_id = self.user_data['userInfo']['userID']
session_id = self.user_data['sessionId']
url = (self.hostname + "/WebAPI/api/locations"
"?userId=%s&allData=True" % user_id)
self.headers['sessionId'] = session_id
response = self._do_request('get', url, json.dumps(self.postdata))
self.full_data = self._convert(response.content)[0]
self.location_id = self.full_data['locationID']
self.devices = {}
self.named_devices = {}
for device in self.full_data['devices']:
self.devices[device['deviceID']] = device
self.named_devices[device['name']] = device
def _populate_user_info(self):
if self.user_data is None:
url = self.hostname + "/WebAPI/api/Session"
self.postdata = {'Username': self.username,
'Password': self.password,
'ApplicationId': '91db1612-73fd-4500-91b2-e63b069b185c'}
self.headers = {'content-type': 'application/json'}
response = self._do_request(
'post', url, data=json.dumps(self.postdata), retry=False)
self.user_data = self._convert(response.content)
return self.user_data
def temperatures(self, force_refresh=False):
"""Retrieve the current details for each zone. Returns a generator."""
self._populate_full_data(force_refresh)
for device in self.full_data['devices']:
set_point = 0
status = ""
if 'heatSetpoint' in device['thermostat']['changeableValues']:
set_point = float(
device['thermostat']['changeableValues']["heatSetpoint"]["value"])
status = device['thermostat']['changeableValues']["heatSetpoint"]["status"]
else:
status = device['thermostat']['changeableValues']['status']
yield {'thermostat': device['thermostatModelType'],
'id': device['deviceID'],
'name': device['name'],
'temp': float(device['thermostat']['indoorTemperature']),
'setpoint': set_point,
'status': status,
'mode': device['thermostat']['changeableValues']['mode']}
def _get_device(self, zone):
if isinstance(zone, str) or (IS_PY2 and isinstance(zone, basestring)): # noqa: F821; pylint: disable=undefined-variable
device = self.named_devices[zone]
else:
device = self.devices[zone]
return device
def _get_task_status(self, task_id):
self._populate_full_data()
url = (self.hostname + "/WebAPI/api/commTasks"
"?commTaskId=%s" % task_id)
response = self._do_request('get', url)
return self._convert(response.content)['state']
def _get_task_id(self, response):
ret = self._convert(response.content)
if isinstance(ret, list):
task_id = ret[0]['id']
else:
task_id = ret['id']
return task_id
def _do_request(self, method, url, data=None, retry=True):
if method == 'get':
func = requests.get
elif method == 'put':
func = requests.put
elif method == 'post':
func = requests.post
response = func(url, data=data, headers=self.headers)
# catch 401/unauthorized since we may retry
if response.status_code == requests.codes.unauthorized and retry: # pylint: disable=no-member
# Attempt to refresh sessionId if it has expired
if 'code' in response.text: # don't use response.json() here!
if response.json()[0]['code'] == "Unauthorized":
_LOGGER.debug("Session expired, re-authenticating...")
# Get a new sessionId
self.user_data = None
self._populate_user_info()
# Set headers with new sessionId
session_id = self.user_data['sessionId']
self.headers['sessionId'] = session_id
_LOGGER.debug("sessionId = %s", session_id)
response = func(url, data=data, headers=self.headers)
# display error message if the vendor provided one
if response.status_code != requests.codes.ok: # pylint: disable=no-member
if 'code' in response.text: # don't use response.json()!
message = ("HTTP Status = " + str(response.status_code) +
", Response = " + response.text)
raise requests.HTTPError(message)
response.raise_for_status()
return response
def _set_status(self, status, until=None):
self._populate_full_data()
url = (self.hostname + "/WebAPI/api/evoTouchSystems"
"?locationId=%s" % self.location_id)
if until is None:
data = {"QuickAction": status, "QuickActionNextTime": None}
else:
data = {
"QuickAction": status,
"QuickActionNextTime": "%sT00:00:00Z" % until.strftime('%Y-%m-%d')
}
response = self._do_request('put', url, json.dumps(data))
task_id = self._get_task_id(response)
while self._get_task_status(task_id) != 'Succeeded':
time.sleep(1)
def set_status_normal(self):
"""Sets the system to normal operation."""
self._set_status('Auto')
def set_status_custom(self, until=None):
"""Sets the system to the custom programme."""
self._set_status('Custom', until)
def set_status_eco(self, until=None):
"""Sets the system to the eco mode."""
self._set_status('AutoWithEco', until)
def set_status_away(self, until=None):
"""Sets the system to the away mode."""
self._set_status('Away', until)
def set_status_dayoff(self, until=None):
"""Sets the system to the day off mode."""
self._set_status('DayOff', until)
def set_status_heatingoff(self, until=None):
"""Sets the system to the heating off mode."""
self._set_status('HeatingOff', until)
def _get_device_id(self, zone):
device = self._get_device(zone)
return device['deviceID']
def _set_heat_setpoint(self, zone, data):
self._populate_full_data()
device_id = self._get_device_id(zone)
url = (self.hostname + "/WebAPI/api/devices"
"/%s/thermostat/changeableValues/heatSetpoint" % device_id)
response = self._do_request('put', url, json.dumps(data))
task_id = self._get_task_id(response)
while self._get_task_status(task_id) != 'Succeeded':
time.sleep(1)
def set_temperature(self, zone, temperature, until=None):
"""Sets the temperature of the given zone."""
if until is None:
data = {"Value": temperature, "Status": "Hold", "NextTime": None}
else:
data = {"Value": temperature,
"Status": "Temporary",
"NextTime": until.strftime('%Y-%m-%dT%H:%M:%SZ')}
self._set_heat_setpoint(zone, data)
def cancel_temp_override(self, zone):
"""Removes an existing temperature override."""
data = {"Value": None, "Status": "Scheduled", "NextTime": None}
self._set_heat_setpoint(zone, data)
def _get_dhw_zone(self):
for device in self.full_data['devices']:
if device['thermostatModelType'] == 'DOMESTIC_HOT_WATER':
return device['deviceID']
return None
def _set_dhw(self, status="Scheduled", mode=None, next_time=None):
"""Set DHW to On, Off or Auto, either indefinitely, or until a
specified time.
"""
data = {"Status": status,
"Mode": mode,
"NextTime": next_time,
"SpecialModes": None,
"HeatSetpoint": None,
"CoolSetpoint": None}
self._populate_full_data()
dhw_zone = self._get_dhw_zone()
if dhw_zone is None:
raise Exception('No DHW zone reported from API')
url = (self.hostname + "/WebAPI/api/devices"
"/%s/thermostat/changeableValues" % dhw_zone)
response = self._do_request('put', url, json.dumps(data))
task_id = self._get_task_id(response)
while self._get_task_status(task_id) != 'Succeeded':
time.sleep(1)
def set_dhw_on(self, until=None):
"""Set DHW to on, either indefinitely, or until a specified time.
When On, the DHW controller will work to keep its target temperature
at/above its target temperature. After the specified time, it will
revert to its scheduled behaviour.
"""
time_until = None if until is None else until.strftime(
'%Y-%m-%dT%H:%M:%SZ')
self._set_dhw(status="Hold", mode="DHWOn", next_time=time_until)
def set_dhw_off(self, until=None):
"""Set DHW to on, either indefinitely, or until a specified time.
When Off, the DHW controller will ignore its target temperature. After
the specified time, it will revert to its scheduled behaviour.
"""
time_until = None if until is None else until.strftime(
'%Y-%m-%dT%H:%M:%SZ')
self._set_dhw(status="Hold", mode="DHWOff", next_time=time_until)
def set_dhw_auto(self):
"""Set DHW to On or Off, according to its schedule."""
self._set_dhw(status="Scheduled")
|
watchforstock/evohome-client
|
evohomeclient/__init__.py
|
EvohomeClient.set_temperature
|
python
|
def set_temperature(self, zone, temperature, until=None):
if until is None:
data = {"Value": temperature, "Status": "Hold", "NextTime": None}
else:
data = {"Value": temperature,
"Status": "Temporary",
"NextTime": until.strftime('%Y-%m-%dT%H:%M:%SZ')}
self._set_heat_setpoint(zone, data)
|
Sets the temperature of the given zone.
|
train
|
https://github.com/watchforstock/evohome-client/blob/f1cb9273e97946d79c0651f00a218abbf7ada53a/evohomeclient/__init__.py#L266-L275
|
[
"def _set_heat_setpoint(self, zone, data):\n self._populate_full_data()\n\n device_id = self._get_device_id(zone)\n\n url = (self.hostname + \"/WebAPI/api/devices\"\n \"/%s/thermostat/changeableValues/heatSetpoint\" % device_id)\n\n response = self._do_request('put', url, json.dumps(data))\n\n task_id = self._get_task_id(response)\n\n while self._get_task_status(task_id) != 'Succeeded':\n time.sleep(1)\n"
] |
class EvohomeClient(object): # pylint: disable=useless-object-inheritance
"""Provides a client to access the Honeywell Evohome system"""
# pylint: disable=too-many-instance-attributes,too-many-arguments
def __init__(self, username, password, debug=False, user_data=None,
hostname="https://tccna.honeywell.com"):
"""Constructor. Takes the username and password for the service.
If user_data is given then this will be used to try and reduce
the number of calls to the authentication service which is known
to be rate limited.
"""
if debug is True:
_LOGGER.setLevel(logging.DEBUG)
_LOGGER.debug("Debug mode is explicitly enabled.")
requests_logger = logging.getLogger("requests.packages.urllib3")
requests_logger.setLevel(logging.DEBUG)
requests_logger.propagate = True
http_client.HTTPConnection.debuglevel = 1
else:
_LOGGER.debug(
"Debug mode is not explicitly enabled "
"(but may be enabled elsewhere)."
)
self.username = username
self.password = password
self.user_data = user_data
self.hostname = hostname
self.full_data = None
self.gateway_data = None
self.reader = codecs.getdecoder("utf-8")
self.location_id = ""
self.devices = {}
self.named_devices = {}
self.postdata = {}
self.headers = {}
def _convert(self, content):
return json.loads(self.reader(content)[0])
def _populate_full_data(self, force_refresh=False):
if self.full_data is None or force_refresh:
self._populate_user_info()
user_id = self.user_data['userInfo']['userID']
session_id = self.user_data['sessionId']
url = (self.hostname + "/WebAPI/api/locations"
"?userId=%s&allData=True" % user_id)
self.headers['sessionId'] = session_id
response = self._do_request('get', url, json.dumps(self.postdata))
self.full_data = self._convert(response.content)[0]
self.location_id = self.full_data['locationID']
self.devices = {}
self.named_devices = {}
for device in self.full_data['devices']:
self.devices[device['deviceID']] = device
self.named_devices[device['name']] = device
def _populate_user_info(self):
if self.user_data is None:
url = self.hostname + "/WebAPI/api/Session"
self.postdata = {'Username': self.username,
'Password': self.password,
'ApplicationId': '91db1612-73fd-4500-91b2-e63b069b185c'}
self.headers = {'content-type': 'application/json'}
response = self._do_request(
'post', url, data=json.dumps(self.postdata), retry=False)
self.user_data = self._convert(response.content)
return self.user_data
def temperatures(self, force_refresh=False):
"""Retrieve the current details for each zone. Returns a generator."""
self._populate_full_data(force_refresh)
for device in self.full_data['devices']:
set_point = 0
status = ""
if 'heatSetpoint' in device['thermostat']['changeableValues']:
set_point = float(
device['thermostat']['changeableValues']["heatSetpoint"]["value"])
status = device['thermostat']['changeableValues']["heatSetpoint"]["status"]
else:
status = device['thermostat']['changeableValues']['status']
yield {'thermostat': device['thermostatModelType'],
'id': device['deviceID'],
'name': device['name'],
'temp': float(device['thermostat']['indoorTemperature']),
'setpoint': set_point,
'status': status,
'mode': device['thermostat']['changeableValues']['mode']}
def get_modes(self, zone):
"""Returns the set of modes the device can be assigned."""
self._populate_full_data()
device = self._get_device(zone)
return device['thermostat']['allowedModes']
def _get_device(self, zone):
if isinstance(zone, str) or (IS_PY2 and isinstance(zone, basestring)): # noqa: F821; pylint: disable=undefined-variable
device = self.named_devices[zone]
else:
device = self.devices[zone]
return device
def _get_task_status(self, task_id):
self._populate_full_data()
url = (self.hostname + "/WebAPI/api/commTasks"
"?commTaskId=%s" % task_id)
response = self._do_request('get', url)
return self._convert(response.content)['state']
def _get_task_id(self, response):
ret = self._convert(response.content)
if isinstance(ret, list):
task_id = ret[0]['id']
else:
task_id = ret['id']
return task_id
def _do_request(self, method, url, data=None, retry=True):
if method == 'get':
func = requests.get
elif method == 'put':
func = requests.put
elif method == 'post':
func = requests.post
response = func(url, data=data, headers=self.headers)
# catch 401/unauthorized since we may retry
if response.status_code == requests.codes.unauthorized and retry: # pylint: disable=no-member
# Attempt to refresh sessionId if it has expired
if 'code' in response.text: # don't use response.json() here!
if response.json()[0]['code'] == "Unauthorized":
_LOGGER.debug("Session expired, re-authenticating...")
# Get a new sessionId
self.user_data = None
self._populate_user_info()
# Set headers with new sessionId
session_id = self.user_data['sessionId']
self.headers['sessionId'] = session_id
_LOGGER.debug("sessionId = %s", session_id)
response = func(url, data=data, headers=self.headers)
# display error message if the vendor provided one
if response.status_code != requests.codes.ok: # pylint: disable=no-member
if 'code' in response.text: # don't use response.json()!
message = ("HTTP Status = " + str(response.status_code) +
", Response = " + response.text)
raise requests.HTTPError(message)
response.raise_for_status()
return response
def _set_status(self, status, until=None):
self._populate_full_data()
url = (self.hostname + "/WebAPI/api/evoTouchSystems"
"?locationId=%s" % self.location_id)
if until is None:
data = {"QuickAction": status, "QuickActionNextTime": None}
else:
data = {
"QuickAction": status,
"QuickActionNextTime": "%sT00:00:00Z" % until.strftime('%Y-%m-%d')
}
response = self._do_request('put', url, json.dumps(data))
task_id = self._get_task_id(response)
while self._get_task_status(task_id) != 'Succeeded':
time.sleep(1)
def set_status_normal(self):
"""Sets the system to normal operation."""
self._set_status('Auto')
def set_status_custom(self, until=None):
"""Sets the system to the custom programme."""
self._set_status('Custom', until)
def set_status_eco(self, until=None):
"""Sets the system to the eco mode."""
self._set_status('AutoWithEco', until)
def set_status_away(self, until=None):
"""Sets the system to the away mode."""
self._set_status('Away', until)
def set_status_dayoff(self, until=None):
"""Sets the system to the day off mode."""
self._set_status('DayOff', until)
def set_status_heatingoff(self, until=None):
"""Sets the system to the heating off mode."""
self._set_status('HeatingOff', until)
def _get_device_id(self, zone):
device = self._get_device(zone)
return device['deviceID']
def _set_heat_setpoint(self, zone, data):
self._populate_full_data()
device_id = self._get_device_id(zone)
url = (self.hostname + "/WebAPI/api/devices"
"/%s/thermostat/changeableValues/heatSetpoint" % device_id)
response = self._do_request('put', url, json.dumps(data))
task_id = self._get_task_id(response)
while self._get_task_status(task_id) != 'Succeeded':
time.sleep(1)
def cancel_temp_override(self, zone):
"""Removes an existing temperature override."""
data = {"Value": None, "Status": "Scheduled", "NextTime": None}
self._set_heat_setpoint(zone, data)
def _get_dhw_zone(self):
for device in self.full_data['devices']:
if device['thermostatModelType'] == 'DOMESTIC_HOT_WATER':
return device['deviceID']
return None
def _set_dhw(self, status="Scheduled", mode=None, next_time=None):
"""Set DHW to On, Off or Auto, either indefinitely, or until a
specified time.
"""
data = {"Status": status,
"Mode": mode,
"NextTime": next_time,
"SpecialModes": None,
"HeatSetpoint": None,
"CoolSetpoint": None}
self._populate_full_data()
dhw_zone = self._get_dhw_zone()
if dhw_zone is None:
raise Exception('No DHW zone reported from API')
url = (self.hostname + "/WebAPI/api/devices"
"/%s/thermostat/changeableValues" % dhw_zone)
response = self._do_request('put', url, json.dumps(data))
task_id = self._get_task_id(response)
while self._get_task_status(task_id) != 'Succeeded':
time.sleep(1)
def set_dhw_on(self, until=None):
"""Set DHW to on, either indefinitely, or until a specified time.
When On, the DHW controller will work to keep its target temperature
at/above its target temperature. After the specified time, it will
revert to its scheduled behaviour.
"""
time_until = None if until is None else until.strftime(
'%Y-%m-%dT%H:%M:%SZ')
self._set_dhw(status="Hold", mode="DHWOn", next_time=time_until)
def set_dhw_off(self, until=None):
"""Set DHW to on, either indefinitely, or until a specified time.
When Off, the DHW controller will ignore its target temperature. After
the specified time, it will revert to its scheduled behaviour.
"""
time_until = None if until is None else until.strftime(
'%Y-%m-%dT%H:%M:%SZ')
self._set_dhw(status="Hold", mode="DHWOff", next_time=time_until)
def set_dhw_auto(self):
"""Set DHW to On or Off, according to its schedule."""
self._set_dhw(status="Scheduled")
|
watchforstock/evohome-client
|
evohomeclient/__init__.py
|
EvohomeClient._set_dhw
|
python
|
def _set_dhw(self, status="Scheduled", mode=None, next_time=None):
data = {"Status": status,
"Mode": mode,
"NextTime": next_time,
"SpecialModes": None,
"HeatSetpoint": None,
"CoolSetpoint": None}
self._populate_full_data()
dhw_zone = self._get_dhw_zone()
if dhw_zone is None:
raise Exception('No DHW zone reported from API')
url = (self.hostname + "/WebAPI/api/devices"
"/%s/thermostat/changeableValues" % dhw_zone)
response = self._do_request('put', url, json.dumps(data))
task_id = self._get_task_id(response)
while self._get_task_status(task_id) != 'Succeeded':
time.sleep(1)
|
Set DHW to On, Off or Auto, either indefinitely, or until a
specified time.
|
train
|
https://github.com/watchforstock/evohome-client/blob/f1cb9273e97946d79c0651f00a218abbf7ada53a/evohomeclient/__init__.py#L288-L311
|
[
"def _populate_full_data(self, force_refresh=False):\n if self.full_data is None or force_refresh:\n self._populate_user_info()\n\n user_id = self.user_data['userInfo']['userID']\n session_id = self.user_data['sessionId']\n\n url = (self.hostname + \"/WebAPI/api/locations\"\n \"?userId=%s&allData=True\" % user_id)\n self.headers['sessionId'] = session_id\n\n response = self._do_request('get', url, json.dumps(self.postdata))\n\n self.full_data = self._convert(response.content)[0]\n\n self.location_id = self.full_data['locationID']\n\n self.devices = {}\n self.named_devices = {}\n\n for device in self.full_data['devices']:\n self.devices[device['deviceID']] = device\n self.named_devices[device['name']] = device\n",
"def _get_task_status(self, task_id):\n self._populate_full_data()\n url = (self.hostname + \"/WebAPI/api/commTasks\"\n \"?commTaskId=%s\" % task_id)\n\n response = self._do_request('get', url)\n\n return self._convert(response.content)['state']\n",
"def _get_task_id(self, response):\n ret = self._convert(response.content)\n\n if isinstance(ret, list):\n task_id = ret[0]['id']\n else:\n task_id = ret['id']\n return task_id\n",
"def _do_request(self, method, url, data=None, retry=True):\n if method == 'get':\n func = requests.get\n elif method == 'put':\n func = requests.put\n elif method == 'post':\n func = requests.post\n\n response = func(url, data=data, headers=self.headers)\n\n # catch 401/unauthorized since we may retry\n if response.status_code == requests.codes.unauthorized and retry: # pylint: disable=no-member\n # Attempt to refresh sessionId if it has expired\n if 'code' in response.text: # don't use response.json() here!\n if response.json()[0]['code'] == \"Unauthorized\":\n _LOGGER.debug(\"Session expired, re-authenticating...\")\n # Get a new sessionId\n self.user_data = None\n self._populate_user_info()\n # Set headers with new sessionId\n session_id = self.user_data['sessionId']\n self.headers['sessionId'] = session_id\n _LOGGER.debug(\"sessionId = %s\", session_id)\n\n response = func(url, data=data, headers=self.headers)\n\n # display error message if the vendor provided one\n if response.status_code != requests.codes.ok: # pylint: disable=no-member\n if 'code' in response.text: # don't use response.json()!\n message = (\"HTTP Status = \" + str(response.status_code) +\n \", Response = \" + response.text)\n raise requests.HTTPError(message)\n\n response.raise_for_status()\n\n return response\n",
"def _get_dhw_zone(self):\n for device in self.full_data['devices']:\n if device['thermostatModelType'] == 'DOMESTIC_HOT_WATER':\n return device['deviceID']\n return None\n"
] |
class EvohomeClient(object): # pylint: disable=useless-object-inheritance
"""Provides a client to access the Honeywell Evohome system"""
# pylint: disable=too-many-instance-attributes,too-many-arguments
def __init__(self, username, password, debug=False, user_data=None,
hostname="https://tccna.honeywell.com"):
"""Constructor. Takes the username and password for the service.
If user_data is given then this will be used to try and reduce
the number of calls to the authentication service which is known
to be rate limited.
"""
if debug is True:
_LOGGER.setLevel(logging.DEBUG)
_LOGGER.debug("Debug mode is explicitly enabled.")
requests_logger = logging.getLogger("requests.packages.urllib3")
requests_logger.setLevel(logging.DEBUG)
requests_logger.propagate = True
http_client.HTTPConnection.debuglevel = 1
else:
_LOGGER.debug(
"Debug mode is not explicitly enabled "
"(but may be enabled elsewhere)."
)
self.username = username
self.password = password
self.user_data = user_data
self.hostname = hostname
self.full_data = None
self.gateway_data = None
self.reader = codecs.getdecoder("utf-8")
self.location_id = ""
self.devices = {}
self.named_devices = {}
self.postdata = {}
self.headers = {}
def _convert(self, content):
return json.loads(self.reader(content)[0])
def _populate_full_data(self, force_refresh=False):
if self.full_data is None or force_refresh:
self._populate_user_info()
user_id = self.user_data['userInfo']['userID']
session_id = self.user_data['sessionId']
url = (self.hostname + "/WebAPI/api/locations"
"?userId=%s&allData=True" % user_id)
self.headers['sessionId'] = session_id
response = self._do_request('get', url, json.dumps(self.postdata))
self.full_data = self._convert(response.content)[0]
self.location_id = self.full_data['locationID']
self.devices = {}
self.named_devices = {}
for device in self.full_data['devices']:
self.devices[device['deviceID']] = device
self.named_devices[device['name']] = device
def _populate_user_info(self):
if self.user_data is None:
url = self.hostname + "/WebAPI/api/Session"
self.postdata = {'Username': self.username,
'Password': self.password,
'ApplicationId': '91db1612-73fd-4500-91b2-e63b069b185c'}
self.headers = {'content-type': 'application/json'}
response = self._do_request(
'post', url, data=json.dumps(self.postdata), retry=False)
self.user_data = self._convert(response.content)
return self.user_data
def temperatures(self, force_refresh=False):
"""Retrieve the current details for each zone. Returns a generator."""
self._populate_full_data(force_refresh)
for device in self.full_data['devices']:
set_point = 0
status = ""
if 'heatSetpoint' in device['thermostat']['changeableValues']:
set_point = float(
device['thermostat']['changeableValues']["heatSetpoint"]["value"])
status = device['thermostat']['changeableValues']["heatSetpoint"]["status"]
else:
status = device['thermostat']['changeableValues']['status']
yield {'thermostat': device['thermostatModelType'],
'id': device['deviceID'],
'name': device['name'],
'temp': float(device['thermostat']['indoorTemperature']),
'setpoint': set_point,
'status': status,
'mode': device['thermostat']['changeableValues']['mode']}
def get_modes(self, zone):
"""Returns the set of modes the device can be assigned."""
self._populate_full_data()
device = self._get_device(zone)
return device['thermostat']['allowedModes']
def _get_device(self, zone):
if isinstance(zone, str) or (IS_PY2 and isinstance(zone, basestring)): # noqa: F821; pylint: disable=undefined-variable
device = self.named_devices[zone]
else:
device = self.devices[zone]
return device
def _get_task_status(self, task_id):
self._populate_full_data()
url = (self.hostname + "/WebAPI/api/commTasks"
"?commTaskId=%s" % task_id)
response = self._do_request('get', url)
return self._convert(response.content)['state']
def _get_task_id(self, response):
ret = self._convert(response.content)
if isinstance(ret, list):
task_id = ret[0]['id']
else:
task_id = ret['id']
return task_id
def _do_request(self, method, url, data=None, retry=True):
if method == 'get':
func = requests.get
elif method == 'put':
func = requests.put
elif method == 'post':
func = requests.post
response = func(url, data=data, headers=self.headers)
# catch 401/unauthorized since we may retry
if response.status_code == requests.codes.unauthorized and retry: # pylint: disable=no-member
# Attempt to refresh sessionId if it has expired
if 'code' in response.text: # don't use response.json() here!
if response.json()[0]['code'] == "Unauthorized":
_LOGGER.debug("Session expired, re-authenticating...")
# Get a new sessionId
self.user_data = None
self._populate_user_info()
# Set headers with new sessionId
session_id = self.user_data['sessionId']
self.headers['sessionId'] = session_id
_LOGGER.debug("sessionId = %s", session_id)
response = func(url, data=data, headers=self.headers)
# display error message if the vendor provided one
if response.status_code != requests.codes.ok: # pylint: disable=no-member
if 'code' in response.text: # don't use response.json()!
message = ("HTTP Status = " + str(response.status_code) +
", Response = " + response.text)
raise requests.HTTPError(message)
response.raise_for_status()
return response
def _set_status(self, status, until=None):
self._populate_full_data()
url = (self.hostname + "/WebAPI/api/evoTouchSystems"
"?locationId=%s" % self.location_id)
if until is None:
data = {"QuickAction": status, "QuickActionNextTime": None}
else:
data = {
"QuickAction": status,
"QuickActionNextTime": "%sT00:00:00Z" % until.strftime('%Y-%m-%d')
}
response = self._do_request('put', url, json.dumps(data))
task_id = self._get_task_id(response)
while self._get_task_status(task_id) != 'Succeeded':
time.sleep(1)
def set_status_normal(self):
"""Sets the system to normal operation."""
self._set_status('Auto')
def set_status_custom(self, until=None):
"""Sets the system to the custom programme."""
self._set_status('Custom', until)
def set_status_eco(self, until=None):
"""Sets the system to the eco mode."""
self._set_status('AutoWithEco', until)
def set_status_away(self, until=None):
"""Sets the system to the away mode."""
self._set_status('Away', until)
def set_status_dayoff(self, until=None):
"""Sets the system to the day off mode."""
self._set_status('DayOff', until)
def set_status_heatingoff(self, until=None):
"""Sets the system to the heating off mode."""
self._set_status('HeatingOff', until)
def _get_device_id(self, zone):
device = self._get_device(zone)
return device['deviceID']
def _set_heat_setpoint(self, zone, data):
self._populate_full_data()
device_id = self._get_device_id(zone)
url = (self.hostname + "/WebAPI/api/devices"
"/%s/thermostat/changeableValues/heatSetpoint" % device_id)
response = self._do_request('put', url, json.dumps(data))
task_id = self._get_task_id(response)
while self._get_task_status(task_id) != 'Succeeded':
time.sleep(1)
def set_temperature(self, zone, temperature, until=None):
"""Sets the temperature of the given zone."""
if until is None:
data = {"Value": temperature, "Status": "Hold", "NextTime": None}
else:
data = {"Value": temperature,
"Status": "Temporary",
"NextTime": until.strftime('%Y-%m-%dT%H:%M:%SZ')}
self._set_heat_setpoint(zone, data)
def cancel_temp_override(self, zone):
"""Removes an existing temperature override."""
data = {"Value": None, "Status": "Scheduled", "NextTime": None}
self._set_heat_setpoint(zone, data)
def _get_dhw_zone(self):
for device in self.full_data['devices']:
if device['thermostatModelType'] == 'DOMESTIC_HOT_WATER':
return device['deviceID']
return None
def set_dhw_on(self, until=None):
"""Set DHW to on, either indefinitely, or until a specified time.
When On, the DHW controller will work to keep its target temperature
at/above its target temperature. After the specified time, it will
revert to its scheduled behaviour.
"""
time_until = None if until is None else until.strftime(
'%Y-%m-%dT%H:%M:%SZ')
self._set_dhw(status="Hold", mode="DHWOn", next_time=time_until)
def set_dhw_off(self, until=None):
"""Set DHW to on, either indefinitely, or until a specified time.
When Off, the DHW controller will ignore its target temperature. After
the specified time, it will revert to its scheduled behaviour.
"""
time_until = None if until is None else until.strftime(
'%Y-%m-%dT%H:%M:%SZ')
self._set_dhw(status="Hold", mode="DHWOff", next_time=time_until)
def set_dhw_auto(self):
"""Set DHW to On or Off, according to its schedule."""
self._set_dhw(status="Scheduled")
|
watchforstock/evohome-client
|
evohomeclient/__init__.py
|
EvohomeClient.set_dhw_on
|
python
|
def set_dhw_on(self, until=None):
time_until = None if until is None else until.strftime(
'%Y-%m-%dT%H:%M:%SZ')
self._set_dhw(status="Hold", mode="DHWOn", next_time=time_until)
|
Set DHW to on, either indefinitely, or until a specified time.
When On, the DHW controller will work to keep its target temperature
at/above its target temperature. After the specified time, it will
revert to its scheduled behaviour.
|
train
|
https://github.com/watchforstock/evohome-client/blob/f1cb9273e97946d79c0651f00a218abbf7ada53a/evohomeclient/__init__.py#L313-L324
|
[
"def _set_dhw(self, status=\"Scheduled\", mode=None, next_time=None):\n \"\"\"Set DHW to On, Off or Auto, either indefinitely, or until a\n specified time.\n \"\"\"\n data = {\"Status\": status,\n \"Mode\": mode,\n \"NextTime\": next_time,\n \"SpecialModes\": None,\n \"HeatSetpoint\": None,\n \"CoolSetpoint\": None}\n\n self._populate_full_data()\n dhw_zone = self._get_dhw_zone()\n if dhw_zone is None:\n raise Exception('No DHW zone reported from API')\n url = (self.hostname + \"/WebAPI/api/devices\"\n \"/%s/thermostat/changeableValues\" % dhw_zone)\n\n response = self._do_request('put', url, json.dumps(data))\n\n task_id = self._get_task_id(response)\n\n while self._get_task_status(task_id) != 'Succeeded':\n time.sleep(1)\n"
] |
class EvohomeClient(object): # pylint: disable=useless-object-inheritance
"""Provides a client to access the Honeywell Evohome system"""
# pylint: disable=too-many-instance-attributes,too-many-arguments
def __init__(self, username, password, debug=False, user_data=None,
hostname="https://tccna.honeywell.com"):
"""Constructor. Takes the username and password for the service.
If user_data is given then this will be used to try and reduce
the number of calls to the authentication service which is known
to be rate limited.
"""
if debug is True:
_LOGGER.setLevel(logging.DEBUG)
_LOGGER.debug("Debug mode is explicitly enabled.")
requests_logger = logging.getLogger("requests.packages.urllib3")
requests_logger.setLevel(logging.DEBUG)
requests_logger.propagate = True
http_client.HTTPConnection.debuglevel = 1
else:
_LOGGER.debug(
"Debug mode is not explicitly enabled "
"(but may be enabled elsewhere)."
)
self.username = username
self.password = password
self.user_data = user_data
self.hostname = hostname
self.full_data = None
self.gateway_data = None
self.reader = codecs.getdecoder("utf-8")
self.location_id = ""
self.devices = {}
self.named_devices = {}
self.postdata = {}
self.headers = {}
def _convert(self, content):
return json.loads(self.reader(content)[0])
def _populate_full_data(self, force_refresh=False):
if self.full_data is None or force_refresh:
self._populate_user_info()
user_id = self.user_data['userInfo']['userID']
session_id = self.user_data['sessionId']
url = (self.hostname + "/WebAPI/api/locations"
"?userId=%s&allData=True" % user_id)
self.headers['sessionId'] = session_id
response = self._do_request('get', url, json.dumps(self.postdata))
self.full_data = self._convert(response.content)[0]
self.location_id = self.full_data['locationID']
self.devices = {}
self.named_devices = {}
for device in self.full_data['devices']:
self.devices[device['deviceID']] = device
self.named_devices[device['name']] = device
def _populate_user_info(self):
if self.user_data is None:
url = self.hostname + "/WebAPI/api/Session"
self.postdata = {'Username': self.username,
'Password': self.password,
'ApplicationId': '91db1612-73fd-4500-91b2-e63b069b185c'}
self.headers = {'content-type': 'application/json'}
response = self._do_request(
'post', url, data=json.dumps(self.postdata), retry=False)
self.user_data = self._convert(response.content)
return self.user_data
def temperatures(self, force_refresh=False):
"""Retrieve the current details for each zone. Returns a generator."""
self._populate_full_data(force_refresh)
for device in self.full_data['devices']:
set_point = 0
status = ""
if 'heatSetpoint' in device['thermostat']['changeableValues']:
set_point = float(
device['thermostat']['changeableValues']["heatSetpoint"]["value"])
status = device['thermostat']['changeableValues']["heatSetpoint"]["status"]
else:
status = device['thermostat']['changeableValues']['status']
yield {'thermostat': device['thermostatModelType'],
'id': device['deviceID'],
'name': device['name'],
'temp': float(device['thermostat']['indoorTemperature']),
'setpoint': set_point,
'status': status,
'mode': device['thermostat']['changeableValues']['mode']}
def get_modes(self, zone):
"""Returns the set of modes the device can be assigned."""
self._populate_full_data()
device = self._get_device(zone)
return device['thermostat']['allowedModes']
def _get_device(self, zone):
if isinstance(zone, str) or (IS_PY2 and isinstance(zone, basestring)): # noqa: F821; pylint: disable=undefined-variable
device = self.named_devices[zone]
else:
device = self.devices[zone]
return device
def _get_task_status(self, task_id):
self._populate_full_data()
url = (self.hostname + "/WebAPI/api/commTasks"
"?commTaskId=%s" % task_id)
response = self._do_request('get', url)
return self._convert(response.content)['state']
def _get_task_id(self, response):
ret = self._convert(response.content)
if isinstance(ret, list):
task_id = ret[0]['id']
else:
task_id = ret['id']
return task_id
def _do_request(self, method, url, data=None, retry=True):
if method == 'get':
func = requests.get
elif method == 'put':
func = requests.put
elif method == 'post':
func = requests.post
response = func(url, data=data, headers=self.headers)
# catch 401/unauthorized since we may retry
if response.status_code == requests.codes.unauthorized and retry: # pylint: disable=no-member
# Attempt to refresh sessionId if it has expired
if 'code' in response.text: # don't use response.json() here!
if response.json()[0]['code'] == "Unauthorized":
_LOGGER.debug("Session expired, re-authenticating...")
# Get a new sessionId
self.user_data = None
self._populate_user_info()
# Set headers with new sessionId
session_id = self.user_data['sessionId']
self.headers['sessionId'] = session_id
_LOGGER.debug("sessionId = %s", session_id)
response = func(url, data=data, headers=self.headers)
# display error message if the vendor provided one
if response.status_code != requests.codes.ok: # pylint: disable=no-member
if 'code' in response.text: # don't use response.json()!
message = ("HTTP Status = " + str(response.status_code) +
", Response = " + response.text)
raise requests.HTTPError(message)
response.raise_for_status()
return response
def _set_status(self, status, until=None):
self._populate_full_data()
url = (self.hostname + "/WebAPI/api/evoTouchSystems"
"?locationId=%s" % self.location_id)
if until is None:
data = {"QuickAction": status, "QuickActionNextTime": None}
else:
data = {
"QuickAction": status,
"QuickActionNextTime": "%sT00:00:00Z" % until.strftime('%Y-%m-%d')
}
response = self._do_request('put', url, json.dumps(data))
task_id = self._get_task_id(response)
while self._get_task_status(task_id) != 'Succeeded':
time.sleep(1)
def set_status_normal(self):
"""Sets the system to normal operation."""
self._set_status('Auto')
def set_status_custom(self, until=None):
"""Sets the system to the custom programme."""
self._set_status('Custom', until)
def set_status_eco(self, until=None):
"""Sets the system to the eco mode."""
self._set_status('AutoWithEco', until)
def set_status_away(self, until=None):
"""Sets the system to the away mode."""
self._set_status('Away', until)
def set_status_dayoff(self, until=None):
"""Sets the system to the day off mode."""
self._set_status('DayOff', until)
def set_status_heatingoff(self, until=None):
"""Sets the system to the heating off mode."""
self._set_status('HeatingOff', until)
def _get_device_id(self, zone):
device = self._get_device(zone)
return device['deviceID']
def _set_heat_setpoint(self, zone, data):
self._populate_full_data()
device_id = self._get_device_id(zone)
url = (self.hostname + "/WebAPI/api/devices"
"/%s/thermostat/changeableValues/heatSetpoint" % device_id)
response = self._do_request('put', url, json.dumps(data))
task_id = self._get_task_id(response)
while self._get_task_status(task_id) != 'Succeeded':
time.sleep(1)
def set_temperature(self, zone, temperature, until=None):
"""Sets the temperature of the given zone."""
if until is None:
data = {"Value": temperature, "Status": "Hold", "NextTime": None}
else:
data = {"Value": temperature,
"Status": "Temporary",
"NextTime": until.strftime('%Y-%m-%dT%H:%M:%SZ')}
self._set_heat_setpoint(zone, data)
def cancel_temp_override(self, zone):
"""Removes an existing temperature override."""
data = {"Value": None, "Status": "Scheduled", "NextTime": None}
self._set_heat_setpoint(zone, data)
def _get_dhw_zone(self):
for device in self.full_data['devices']:
if device['thermostatModelType'] == 'DOMESTIC_HOT_WATER':
return device['deviceID']
return None
def _set_dhw(self, status="Scheduled", mode=None, next_time=None):
"""Set DHW to On, Off or Auto, either indefinitely, or until a
specified time.
"""
data = {"Status": status,
"Mode": mode,
"NextTime": next_time,
"SpecialModes": None,
"HeatSetpoint": None,
"CoolSetpoint": None}
self._populate_full_data()
dhw_zone = self._get_dhw_zone()
if dhw_zone is None:
raise Exception('No DHW zone reported from API')
url = (self.hostname + "/WebAPI/api/devices"
"/%s/thermostat/changeableValues" % dhw_zone)
response = self._do_request('put', url, json.dumps(data))
task_id = self._get_task_id(response)
while self._get_task_status(task_id) != 'Succeeded':
time.sleep(1)
def set_dhw_off(self, until=None):
"""Set DHW to on, either indefinitely, or until a specified time.
When Off, the DHW controller will ignore its target temperature. After
the specified time, it will revert to its scheduled behaviour.
"""
time_until = None if until is None else until.strftime(
'%Y-%m-%dT%H:%M:%SZ')
self._set_dhw(status="Hold", mode="DHWOff", next_time=time_until)
def set_dhw_auto(self):
"""Set DHW to On or Off, according to its schedule."""
self._set_dhw(status="Scheduled")
|
watchforstock/evohome-client
|
evohomeclient2/__init__.py
|
EvohomeClient._headers
|
python
|
def _headers(self):
if not self.access_token or not self.access_token_expires:
self._basic_login()
elif datetime.now() > self.access_token_expires - timedelta(seconds=30):
self._basic_login()
return {'Accept': HEADER_ACCEPT,
'Authorization': 'bearer ' + self.access_token}
|
Ensure the Authorization Header has a valid Access Token.
|
train
|
https://github.com/watchforstock/evohome-client/blob/f1cb9273e97946d79c0651f00a218abbf7ada53a/evohomeclient2/__init__.py#L84-L93
|
[
"def _basic_login(self):\n \"\"\"Obtain a new access token from the vendor.\n\n First, try using the refresh_token, if one is available, otherwise\n authenticate using the user credentials.\n \"\"\"\n _LOGGER.debug(\"No/Expired/Invalid access_token, re-authenticating...\")\n self.access_token = self.access_token_expires = None\n\n if self.refresh_token:\n _LOGGER.debug(\"Trying refresh_token...\")\n credentials = {'grant_type': \"refresh_token\",\n 'scope': \"EMEA-V1-Basic EMEA-V1-Anonymous\",\n 'refresh_token': self.refresh_token}\n\n try:\n self._obtain_access_token(credentials)\n except (requests.HTTPError, KeyError, ValueError):\n _LOGGER.warning(\n \"Invalid refresh_token, will try user credentials.\")\n self.refresh_token = None\n\n if not self.refresh_token:\n _LOGGER.debug(\"Trying user credentials...\")\n credentials = {'grant_type': \"password\",\n 'scope': \"EMEA-V1-Basic EMEA-V1-Anonymous \"\n \"EMEA-V1-Get-Current-User-Account\",\n 'Username': self.username,\n 'Password': self.password}\n\n self._obtain_access_token(credentials)\n\n _LOGGER.debug(\"refresh_token = %s\", self.refresh_token)\n _LOGGER.debug(\"access_token = %s\", self.access_token)\n _LOGGER.debug(\"access_token_expires = %s\",\n self.access_token_expires.strftime(\"%Y-%m-%d %H:%M:%S\"))\n"
] |
class EvohomeClient(object): # pylint: disable=too-many-instance-attributes,useless-object-inheritance
"""Provides access to the v2 Evohome API."""
def __init__(self, username, password, debug=False, refresh_token=None, # pylint: disable=too-many-arguments
access_token=None, access_token_expires=None):
"""Construct the EvohomeClient object."""
if debug is True:
_LOGGER.setLevel(logging.DEBUG)
_LOGGER.debug("Debug mode is explicitly enabled.")
requests_logger = logging.getLogger("requests.packages.urllib3")
requests_logger.setLevel(logging.DEBUG)
requests_logger.propagate = True
http_client.HTTPConnection.debuglevel = 1
else:
_LOGGER.debug(
"Debug mode is not explicitly enabled "
"(but may be enabled elsewhere)."
)
self.username = username
self.password = password
self.refresh_token = refresh_token
self.access_token = access_token
self.access_token_expires = access_token_expires
self.account_info = None
self.locations = None
self.installation_info = None
self.system_id = None
self._login()
def _login(self):
self.user_account()
self.installation()
def _basic_login(self):
"""Obtain a new access token from the vendor.
First, try using the refresh_token, if one is available, otherwise
authenticate using the user credentials.
"""
_LOGGER.debug("No/Expired/Invalid access_token, re-authenticating...")
self.access_token = self.access_token_expires = None
if self.refresh_token:
_LOGGER.debug("Trying refresh_token...")
credentials = {'grant_type': "refresh_token",
'scope': "EMEA-V1-Basic EMEA-V1-Anonymous",
'refresh_token': self.refresh_token}
try:
self._obtain_access_token(credentials)
except (requests.HTTPError, KeyError, ValueError):
_LOGGER.warning(
"Invalid refresh_token, will try user credentials.")
self.refresh_token = None
if not self.refresh_token:
_LOGGER.debug("Trying user credentials...")
credentials = {'grant_type': "password",
'scope': "EMEA-V1-Basic EMEA-V1-Anonymous "
"EMEA-V1-Get-Current-User-Account",
'Username': self.username,
'Password': self.password}
self._obtain_access_token(credentials)
_LOGGER.debug("refresh_token = %s", self.refresh_token)
_LOGGER.debug("access_token = %s", self.access_token)
_LOGGER.debug("access_token_expires = %s",
self.access_token_expires.strftime("%Y-%m-%d %H:%M:%S"))
def _obtain_access_token(self, credentials):
url = 'https://tccna.honeywell.com/Auth/OAuth/Token'
payload = {
'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8',
'Host': 'rs.alarmnet.com/',
'Cache-Control': 'no-store no-cache',
'Pragma': 'no-cache',
'Connection': 'Keep-Alive'
}
payload.update(credentials) # merge the credentials into the payload
response = requests.post(url, data=payload, headers=HEADER_BASIC_AUTH)
try:
response.raise_for_status()
except requests.HTTPError:
msg = "Unable to obtain an Access Token"
if response.text: # if there is a message, then raise with it
msg = msg + ", hint: " + response.text
raise AuthenticationError(msg)
try: # the access token _should_ be valid...
# this may cause a ValueError
response_json = response.json()
# these may cause a KeyError
self.access_token = response_json['access_token']
self.access_token_expires = (
datetime.now() +
timedelta(seconds=response_json['expires_in'])
)
self.refresh_token = response_json['refresh_token']
except KeyError:
raise AuthenticationError("Unable to obtain an Access Token, "
"hint: " + response_json)
except ValueError:
raise AuthenticationError("Unable to obtain an Access Token, "
"hint: " + response.text)
def _get_location(self, location):
if location is None:
return self.installation_info[0]['locationInfo']['locationId']
return location
def _get_single_heating_system(self):
# This allows a shortcut for some systems
if len(self.locations) != 1:
raise Exception("More (or less) than one location available")
if len(self.locations[0]._gateways) != 1: # pylint: disable=protected-access
raise Exception("More (or less) than one gateway available")
if len(self.locations[0]._gateways[0]._control_systems) != 1: # pylint: disable=protected-access
raise Exception("More (or less) than one control system available")
return self.locations[0]._gateways[0]._control_systems[0] # pylint: disable=protected-access
def user_account(self):
"""Return the user account information."""
self.account_info = None
url = 'https://tccna.honeywell.com/WebAPI/emea/api/v1/userAccount'
response = requests.get(url, headers=self._headers())
response.raise_for_status()
self.account_info = response.json()
return self.account_info
def installation(self):
"""Return the details of the installation."""
self.locations = []
url = ("https://tccna.honeywell.com/WebAPI/emea/api/v1/location"
"/installationInfo?userId=%s"
"&includeTemperatureControlSystems=True"
% self.account_info['userId'])
response = requests.get(url, headers=self._headers())
response.raise_for_status()
self.installation_info = response.json()
self.system_id = (self.installation_info[0]['gateways'][0]
['temperatureControlSystems'][0]['systemId'])
for loc_data in self.installation_info:
self.locations.append(Location(self, loc_data))
return self.installation_info
def full_installation(self, location=None):
"""Return the full details of the installation."""
url = ("https://tccna.honeywell.com/WebAPI/emea/api/v1/location"
"/%s/installationInfo?includeTemperatureControlSystems=True"
% self._get_location(location))
response = requests.get(url, headers=self._headers())
response.raise_for_status()
return response.json()
def gateway(self):
"""Return the detail of the gateway."""
url = 'https://tccna.honeywell.com/WebAPI/emea/api/v1/gateway'
response = requests.get(url, headers=self._headers())
response.raise_for_status()
return response.json()
def set_status_normal(self):
"""Set the system into normal heating mode."""
return self._get_single_heating_system().set_status_normal()
def set_status_reset(self):
"""Reset the system mode."""
return self._get_single_heating_system().set_status_reset()
def set_status_custom(self, until=None):
"""Set the system into custom heating mode."""
return self._get_single_heating_system().set_status_custom(until)
def set_status_eco(self, until=None):
"""Set the system into eco heating mode."""
return self._get_single_heating_system().set_status_eco(until)
def set_status_away(self, until=None):
"""Set the system into away heating mode."""
return self._get_single_heating_system().set_status_away(until)
def set_status_dayoff(self, until=None):
"""Set the system into day off heating mode."""
return self._get_single_heating_system().set_status_dayoff(until)
def set_status_heatingoff(self, until=None):
"""Set the system into heating off heating mode."""
return self._get_single_heating_system().set_status_heatingoff(until)
def temperatures(self):
"""Return the current zone temperatures and set points."""
return self._get_single_heating_system().temperatures()
def zone_schedules_backup(self, filename):
"""Back up the current system configuration to the given file."""
return self._get_single_heating_system().zone_schedules_backup(filename)
def zone_schedules_restore(self, filename):
"""Restore the current system configuration from the given file."""
return self._get_single_heating_system().zone_schedules_restore(filename)
|
watchforstock/evohome-client
|
evohomeclient2/__init__.py
|
EvohomeClient._basic_login
|
python
|
def _basic_login(self):
_LOGGER.debug("No/Expired/Invalid access_token, re-authenticating...")
self.access_token = self.access_token_expires = None
if self.refresh_token:
_LOGGER.debug("Trying refresh_token...")
credentials = {'grant_type': "refresh_token",
'scope': "EMEA-V1-Basic EMEA-V1-Anonymous",
'refresh_token': self.refresh_token}
try:
self._obtain_access_token(credentials)
except (requests.HTTPError, KeyError, ValueError):
_LOGGER.warning(
"Invalid refresh_token, will try user credentials.")
self.refresh_token = None
if not self.refresh_token:
_LOGGER.debug("Trying user credentials...")
credentials = {'grant_type': "password",
'scope': "EMEA-V1-Basic EMEA-V1-Anonymous "
"EMEA-V1-Get-Current-User-Account",
'Username': self.username,
'Password': self.password}
self._obtain_access_token(credentials)
_LOGGER.debug("refresh_token = %s", self.refresh_token)
_LOGGER.debug("access_token = %s", self.access_token)
_LOGGER.debug("access_token_expires = %s",
self.access_token_expires.strftime("%Y-%m-%d %H:%M:%S"))
|
Obtain a new access token from the vendor.
First, try using the refresh_token, if one is available, otherwise
authenticate using the user credentials.
|
train
|
https://github.com/watchforstock/evohome-client/blob/f1cb9273e97946d79c0651f00a218abbf7ada53a/evohomeclient2/__init__.py#L95-L130
| null |
class EvohomeClient(object): # pylint: disable=too-many-instance-attributes,useless-object-inheritance
"""Provides access to the v2 Evohome API."""
def __init__(self, username, password, debug=False, refresh_token=None, # pylint: disable=too-many-arguments
access_token=None, access_token_expires=None):
"""Construct the EvohomeClient object."""
if debug is True:
_LOGGER.setLevel(logging.DEBUG)
_LOGGER.debug("Debug mode is explicitly enabled.")
requests_logger = logging.getLogger("requests.packages.urllib3")
requests_logger.setLevel(logging.DEBUG)
requests_logger.propagate = True
http_client.HTTPConnection.debuglevel = 1
else:
_LOGGER.debug(
"Debug mode is not explicitly enabled "
"(but may be enabled elsewhere)."
)
self.username = username
self.password = password
self.refresh_token = refresh_token
self.access_token = access_token
self.access_token_expires = access_token_expires
self.account_info = None
self.locations = None
self.installation_info = None
self.system_id = None
self._login()
def _login(self):
self.user_account()
self.installation()
def _headers(self):
"""Ensure the Authorization Header has a valid Access Token."""
if not self.access_token or not self.access_token_expires:
self._basic_login()
elif datetime.now() > self.access_token_expires - timedelta(seconds=30):
self._basic_login()
return {'Accept': HEADER_ACCEPT,
'Authorization': 'bearer ' + self.access_token}
def _obtain_access_token(self, credentials):
url = 'https://tccna.honeywell.com/Auth/OAuth/Token'
payload = {
'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8',
'Host': 'rs.alarmnet.com/',
'Cache-Control': 'no-store no-cache',
'Pragma': 'no-cache',
'Connection': 'Keep-Alive'
}
payload.update(credentials) # merge the credentials into the payload
response = requests.post(url, data=payload, headers=HEADER_BASIC_AUTH)
try:
response.raise_for_status()
except requests.HTTPError:
msg = "Unable to obtain an Access Token"
if response.text: # if there is a message, then raise with it
msg = msg + ", hint: " + response.text
raise AuthenticationError(msg)
try: # the access token _should_ be valid...
# this may cause a ValueError
response_json = response.json()
# these may cause a KeyError
self.access_token = response_json['access_token']
self.access_token_expires = (
datetime.now() +
timedelta(seconds=response_json['expires_in'])
)
self.refresh_token = response_json['refresh_token']
except KeyError:
raise AuthenticationError("Unable to obtain an Access Token, "
"hint: " + response_json)
except ValueError:
raise AuthenticationError("Unable to obtain an Access Token, "
"hint: " + response.text)
def _get_location(self, location):
if location is None:
return self.installation_info[0]['locationInfo']['locationId']
return location
def _get_single_heating_system(self):
# This allows a shortcut for some systems
if len(self.locations) != 1:
raise Exception("More (or less) than one location available")
if len(self.locations[0]._gateways) != 1: # pylint: disable=protected-access
raise Exception("More (or less) than one gateway available")
if len(self.locations[0]._gateways[0]._control_systems) != 1: # pylint: disable=protected-access
raise Exception("More (or less) than one control system available")
return self.locations[0]._gateways[0]._control_systems[0] # pylint: disable=protected-access
def user_account(self):
"""Return the user account information."""
self.account_info = None
url = 'https://tccna.honeywell.com/WebAPI/emea/api/v1/userAccount'
response = requests.get(url, headers=self._headers())
response.raise_for_status()
self.account_info = response.json()
return self.account_info
def installation(self):
"""Return the details of the installation."""
self.locations = []
url = ("https://tccna.honeywell.com/WebAPI/emea/api/v1/location"
"/installationInfo?userId=%s"
"&includeTemperatureControlSystems=True"
% self.account_info['userId'])
response = requests.get(url, headers=self._headers())
response.raise_for_status()
self.installation_info = response.json()
self.system_id = (self.installation_info[0]['gateways'][0]
['temperatureControlSystems'][0]['systemId'])
for loc_data in self.installation_info:
self.locations.append(Location(self, loc_data))
return self.installation_info
def full_installation(self, location=None):
"""Return the full details of the installation."""
url = ("https://tccna.honeywell.com/WebAPI/emea/api/v1/location"
"/%s/installationInfo?includeTemperatureControlSystems=True"
% self._get_location(location))
response = requests.get(url, headers=self._headers())
response.raise_for_status()
return response.json()
def gateway(self):
"""Return the detail of the gateway."""
url = 'https://tccna.honeywell.com/WebAPI/emea/api/v1/gateway'
response = requests.get(url, headers=self._headers())
response.raise_for_status()
return response.json()
def set_status_normal(self):
"""Set the system into normal heating mode."""
return self._get_single_heating_system().set_status_normal()
def set_status_reset(self):
"""Reset the system mode."""
return self._get_single_heating_system().set_status_reset()
def set_status_custom(self, until=None):
"""Set the system into custom heating mode."""
return self._get_single_heating_system().set_status_custom(until)
def set_status_eco(self, until=None):
"""Set the system into eco heating mode."""
return self._get_single_heating_system().set_status_eco(until)
def set_status_away(self, until=None):
"""Set the system into away heating mode."""
return self._get_single_heating_system().set_status_away(until)
def set_status_dayoff(self, until=None):
"""Set the system into day off heating mode."""
return self._get_single_heating_system().set_status_dayoff(until)
def set_status_heatingoff(self, until=None):
"""Set the system into heating off heating mode."""
return self._get_single_heating_system().set_status_heatingoff(until)
def temperatures(self):
"""Return the current zone temperatures and set points."""
return self._get_single_heating_system().temperatures()
def zone_schedules_backup(self, filename):
"""Back up the current system configuration to the given file."""
return self._get_single_heating_system().zone_schedules_backup(filename)
def zone_schedules_restore(self, filename):
"""Restore the current system configuration from the given file."""
return self._get_single_heating_system().zone_schedules_restore(filename)
|
watchforstock/evohome-client
|
evohomeclient2/__init__.py
|
EvohomeClient.user_account
|
python
|
def user_account(self):
self.account_info = None
url = 'https://tccna.honeywell.com/WebAPI/emea/api/v1/userAccount'
response = requests.get(url, headers=self._headers())
response.raise_for_status()
self.account_info = response.json()
return self.account_info
|
Return the user account information.
|
train
|
https://github.com/watchforstock/evohome-client/blob/f1cb9273e97946d79c0651f00a218abbf7ada53a/evohomeclient2/__init__.py#L192-L202
|
[
"def _headers(self):\n \"\"\"Ensure the Authorization Header has a valid Access Token.\"\"\"\n if not self.access_token or not self.access_token_expires:\n self._basic_login()\n\n elif datetime.now() > self.access_token_expires - timedelta(seconds=30):\n self._basic_login()\n\n return {'Accept': HEADER_ACCEPT,\n 'Authorization': 'bearer ' + self.access_token}\n"
] |
class EvohomeClient(object): # pylint: disable=too-many-instance-attributes,useless-object-inheritance
"""Provides access to the v2 Evohome API."""
def __init__(self, username, password, debug=False, refresh_token=None, # pylint: disable=too-many-arguments
access_token=None, access_token_expires=None):
"""Construct the EvohomeClient object."""
if debug is True:
_LOGGER.setLevel(logging.DEBUG)
_LOGGER.debug("Debug mode is explicitly enabled.")
requests_logger = logging.getLogger("requests.packages.urllib3")
requests_logger.setLevel(logging.DEBUG)
requests_logger.propagate = True
http_client.HTTPConnection.debuglevel = 1
else:
_LOGGER.debug(
"Debug mode is not explicitly enabled "
"(but may be enabled elsewhere)."
)
self.username = username
self.password = password
self.refresh_token = refresh_token
self.access_token = access_token
self.access_token_expires = access_token_expires
self.account_info = None
self.locations = None
self.installation_info = None
self.system_id = None
self._login()
def _login(self):
self.user_account()
self.installation()
def _headers(self):
"""Ensure the Authorization Header has a valid Access Token."""
if not self.access_token or not self.access_token_expires:
self._basic_login()
elif datetime.now() > self.access_token_expires - timedelta(seconds=30):
self._basic_login()
return {'Accept': HEADER_ACCEPT,
'Authorization': 'bearer ' + self.access_token}
def _basic_login(self):
"""Obtain a new access token from the vendor.
First, try using the refresh_token, if one is available, otherwise
authenticate using the user credentials.
"""
_LOGGER.debug("No/Expired/Invalid access_token, re-authenticating...")
self.access_token = self.access_token_expires = None
if self.refresh_token:
_LOGGER.debug("Trying refresh_token...")
credentials = {'grant_type': "refresh_token",
'scope': "EMEA-V1-Basic EMEA-V1-Anonymous",
'refresh_token': self.refresh_token}
try:
self._obtain_access_token(credentials)
except (requests.HTTPError, KeyError, ValueError):
_LOGGER.warning(
"Invalid refresh_token, will try user credentials.")
self.refresh_token = None
if not self.refresh_token:
_LOGGER.debug("Trying user credentials...")
credentials = {'grant_type': "password",
'scope': "EMEA-V1-Basic EMEA-V1-Anonymous "
"EMEA-V1-Get-Current-User-Account",
'Username': self.username,
'Password': self.password}
self._obtain_access_token(credentials)
_LOGGER.debug("refresh_token = %s", self.refresh_token)
_LOGGER.debug("access_token = %s", self.access_token)
_LOGGER.debug("access_token_expires = %s",
self.access_token_expires.strftime("%Y-%m-%d %H:%M:%S"))
def _obtain_access_token(self, credentials):
url = 'https://tccna.honeywell.com/Auth/OAuth/Token'
payload = {
'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8',
'Host': 'rs.alarmnet.com/',
'Cache-Control': 'no-store no-cache',
'Pragma': 'no-cache',
'Connection': 'Keep-Alive'
}
payload.update(credentials) # merge the credentials into the payload
response = requests.post(url, data=payload, headers=HEADER_BASIC_AUTH)
try:
response.raise_for_status()
except requests.HTTPError:
msg = "Unable to obtain an Access Token"
if response.text: # if there is a message, then raise with it
msg = msg + ", hint: " + response.text
raise AuthenticationError(msg)
try: # the access token _should_ be valid...
# this may cause a ValueError
response_json = response.json()
# these may cause a KeyError
self.access_token = response_json['access_token']
self.access_token_expires = (
datetime.now() +
timedelta(seconds=response_json['expires_in'])
)
self.refresh_token = response_json['refresh_token']
except KeyError:
raise AuthenticationError("Unable to obtain an Access Token, "
"hint: " + response_json)
except ValueError:
raise AuthenticationError("Unable to obtain an Access Token, "
"hint: " + response.text)
def _get_location(self, location):
if location is None:
return self.installation_info[0]['locationInfo']['locationId']
return location
def _get_single_heating_system(self):
# This allows a shortcut for some systems
if len(self.locations) != 1:
raise Exception("More (or less) than one location available")
if len(self.locations[0]._gateways) != 1: # pylint: disable=protected-access
raise Exception("More (or less) than one gateway available")
if len(self.locations[0]._gateways[0]._control_systems) != 1: # pylint: disable=protected-access
raise Exception("More (or less) than one control system available")
return self.locations[0]._gateways[0]._control_systems[0] # pylint: disable=protected-access
def installation(self):
"""Return the details of the installation."""
self.locations = []
url = ("https://tccna.honeywell.com/WebAPI/emea/api/v1/location"
"/installationInfo?userId=%s"
"&includeTemperatureControlSystems=True"
% self.account_info['userId'])
response = requests.get(url, headers=self._headers())
response.raise_for_status()
self.installation_info = response.json()
self.system_id = (self.installation_info[0]['gateways'][0]
['temperatureControlSystems'][0]['systemId'])
for loc_data in self.installation_info:
self.locations.append(Location(self, loc_data))
return self.installation_info
def full_installation(self, location=None):
"""Return the full details of the installation."""
url = ("https://tccna.honeywell.com/WebAPI/emea/api/v1/location"
"/%s/installationInfo?includeTemperatureControlSystems=True"
% self._get_location(location))
response = requests.get(url, headers=self._headers())
response.raise_for_status()
return response.json()
def gateway(self):
"""Return the detail of the gateway."""
url = 'https://tccna.honeywell.com/WebAPI/emea/api/v1/gateway'
response = requests.get(url, headers=self._headers())
response.raise_for_status()
return response.json()
def set_status_normal(self):
"""Set the system into normal heating mode."""
return self._get_single_heating_system().set_status_normal()
def set_status_reset(self):
"""Reset the system mode."""
return self._get_single_heating_system().set_status_reset()
def set_status_custom(self, until=None):
"""Set the system into custom heating mode."""
return self._get_single_heating_system().set_status_custom(until)
def set_status_eco(self, until=None):
"""Set the system into eco heating mode."""
return self._get_single_heating_system().set_status_eco(until)
def set_status_away(self, until=None):
"""Set the system into away heating mode."""
return self._get_single_heating_system().set_status_away(until)
def set_status_dayoff(self, until=None):
"""Set the system into day off heating mode."""
return self._get_single_heating_system().set_status_dayoff(until)
def set_status_heatingoff(self, until=None):
"""Set the system into heating off heating mode."""
return self._get_single_heating_system().set_status_heatingoff(until)
def temperatures(self):
"""Return the current zone temperatures and set points."""
return self._get_single_heating_system().temperatures()
def zone_schedules_backup(self, filename):
"""Back up the current system configuration to the given file."""
return self._get_single_heating_system().zone_schedules_backup(filename)
def zone_schedules_restore(self, filename):
"""Restore the current system configuration from the given file."""
return self._get_single_heating_system().zone_schedules_restore(filename)
|
watchforstock/evohome-client
|
evohomeclient2/__init__.py
|
EvohomeClient.installation
|
python
|
def installation(self):
self.locations = []
url = ("https://tccna.honeywell.com/WebAPI/emea/api/v1/location"
"/installationInfo?userId=%s"
"&includeTemperatureControlSystems=True"
% self.account_info['userId'])
response = requests.get(url, headers=self._headers())
response.raise_for_status()
self.installation_info = response.json()
self.system_id = (self.installation_info[0]['gateways'][0]
['temperatureControlSystems'][0]['systemId'])
for loc_data in self.installation_info:
self.locations.append(Location(self, loc_data))
return self.installation_info
|
Return the details of the installation.
|
train
|
https://github.com/watchforstock/evohome-client/blob/f1cb9273e97946d79c0651f00a218abbf7ada53a/evohomeclient2/__init__.py#L204-L223
|
[
"def _headers(self):\n \"\"\"Ensure the Authorization Header has a valid Access Token.\"\"\"\n if not self.access_token or not self.access_token_expires:\n self._basic_login()\n\n elif datetime.now() > self.access_token_expires - timedelta(seconds=30):\n self._basic_login()\n\n return {'Accept': HEADER_ACCEPT,\n 'Authorization': 'bearer ' + self.access_token}\n"
] |
class EvohomeClient(object): # pylint: disable=too-many-instance-attributes,useless-object-inheritance
"""Provides access to the v2 Evohome API."""
def __init__(self, username, password, debug=False, refresh_token=None, # pylint: disable=too-many-arguments
access_token=None, access_token_expires=None):
"""Construct the EvohomeClient object."""
if debug is True:
_LOGGER.setLevel(logging.DEBUG)
_LOGGER.debug("Debug mode is explicitly enabled.")
requests_logger = logging.getLogger("requests.packages.urllib3")
requests_logger.setLevel(logging.DEBUG)
requests_logger.propagate = True
http_client.HTTPConnection.debuglevel = 1
else:
_LOGGER.debug(
"Debug mode is not explicitly enabled "
"(but may be enabled elsewhere)."
)
self.username = username
self.password = password
self.refresh_token = refresh_token
self.access_token = access_token
self.access_token_expires = access_token_expires
self.account_info = None
self.locations = None
self.installation_info = None
self.system_id = None
self._login()
def _login(self):
self.user_account()
self.installation()
def _headers(self):
"""Ensure the Authorization Header has a valid Access Token."""
if not self.access_token or not self.access_token_expires:
self._basic_login()
elif datetime.now() > self.access_token_expires - timedelta(seconds=30):
self._basic_login()
return {'Accept': HEADER_ACCEPT,
'Authorization': 'bearer ' + self.access_token}
def _basic_login(self):
"""Obtain a new access token from the vendor.
First, try using the refresh_token, if one is available, otherwise
authenticate using the user credentials.
"""
_LOGGER.debug("No/Expired/Invalid access_token, re-authenticating...")
self.access_token = self.access_token_expires = None
if self.refresh_token:
_LOGGER.debug("Trying refresh_token...")
credentials = {'grant_type': "refresh_token",
'scope': "EMEA-V1-Basic EMEA-V1-Anonymous",
'refresh_token': self.refresh_token}
try:
self._obtain_access_token(credentials)
except (requests.HTTPError, KeyError, ValueError):
_LOGGER.warning(
"Invalid refresh_token, will try user credentials.")
self.refresh_token = None
if not self.refresh_token:
_LOGGER.debug("Trying user credentials...")
credentials = {'grant_type': "password",
'scope': "EMEA-V1-Basic EMEA-V1-Anonymous "
"EMEA-V1-Get-Current-User-Account",
'Username': self.username,
'Password': self.password}
self._obtain_access_token(credentials)
_LOGGER.debug("refresh_token = %s", self.refresh_token)
_LOGGER.debug("access_token = %s", self.access_token)
_LOGGER.debug("access_token_expires = %s",
self.access_token_expires.strftime("%Y-%m-%d %H:%M:%S"))
def _obtain_access_token(self, credentials):
url = 'https://tccna.honeywell.com/Auth/OAuth/Token'
payload = {
'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8',
'Host': 'rs.alarmnet.com/',
'Cache-Control': 'no-store no-cache',
'Pragma': 'no-cache',
'Connection': 'Keep-Alive'
}
payload.update(credentials) # merge the credentials into the payload
response = requests.post(url, data=payload, headers=HEADER_BASIC_AUTH)
try:
response.raise_for_status()
except requests.HTTPError:
msg = "Unable to obtain an Access Token"
if response.text: # if there is a message, then raise with it
msg = msg + ", hint: " + response.text
raise AuthenticationError(msg)
try: # the access token _should_ be valid...
# this may cause a ValueError
response_json = response.json()
# these may cause a KeyError
self.access_token = response_json['access_token']
self.access_token_expires = (
datetime.now() +
timedelta(seconds=response_json['expires_in'])
)
self.refresh_token = response_json['refresh_token']
except KeyError:
raise AuthenticationError("Unable to obtain an Access Token, "
"hint: " + response_json)
except ValueError:
raise AuthenticationError("Unable to obtain an Access Token, "
"hint: " + response.text)
def _get_location(self, location):
if location is None:
return self.installation_info[0]['locationInfo']['locationId']
return location
def _get_single_heating_system(self):
# This allows a shortcut for some systems
if len(self.locations) != 1:
raise Exception("More (or less) than one location available")
if len(self.locations[0]._gateways) != 1: # pylint: disable=protected-access
raise Exception("More (or less) than one gateway available")
if len(self.locations[0]._gateways[0]._control_systems) != 1: # pylint: disable=protected-access
raise Exception("More (or less) than one control system available")
return self.locations[0]._gateways[0]._control_systems[0] # pylint: disable=protected-access
def user_account(self):
"""Return the user account information."""
self.account_info = None
url = 'https://tccna.honeywell.com/WebAPI/emea/api/v1/userAccount'
response = requests.get(url, headers=self._headers())
response.raise_for_status()
self.account_info = response.json()
return self.account_info
def full_installation(self, location=None):
"""Return the full details of the installation."""
url = ("https://tccna.honeywell.com/WebAPI/emea/api/v1/location"
"/%s/installationInfo?includeTemperatureControlSystems=True"
% self._get_location(location))
response = requests.get(url, headers=self._headers())
response.raise_for_status()
return response.json()
def gateway(self):
"""Return the detail of the gateway."""
url = 'https://tccna.honeywell.com/WebAPI/emea/api/v1/gateway'
response = requests.get(url, headers=self._headers())
response.raise_for_status()
return response.json()
def set_status_normal(self):
"""Set the system into normal heating mode."""
return self._get_single_heating_system().set_status_normal()
def set_status_reset(self):
"""Reset the system mode."""
return self._get_single_heating_system().set_status_reset()
def set_status_custom(self, until=None):
"""Set the system into custom heating mode."""
return self._get_single_heating_system().set_status_custom(until)
def set_status_eco(self, until=None):
"""Set the system into eco heating mode."""
return self._get_single_heating_system().set_status_eco(until)
def set_status_away(self, until=None):
"""Set the system into away heating mode."""
return self._get_single_heating_system().set_status_away(until)
def set_status_dayoff(self, until=None):
"""Set the system into day off heating mode."""
return self._get_single_heating_system().set_status_dayoff(until)
def set_status_heatingoff(self, until=None):
"""Set the system into heating off heating mode."""
return self._get_single_heating_system().set_status_heatingoff(until)
def temperatures(self):
"""Return the current zone temperatures and set points."""
return self._get_single_heating_system().temperatures()
def zone_schedules_backup(self, filename):
"""Back up the current system configuration to the given file."""
return self._get_single_heating_system().zone_schedules_backup(filename)
def zone_schedules_restore(self, filename):
"""Restore the current system configuration from the given file."""
return self._get_single_heating_system().zone_schedules_restore(filename)
|
watchforstock/evohome-client
|
evohomeclient2/__init__.py
|
EvohomeClient.full_installation
|
python
|
def full_installation(self, location=None):
url = ("https://tccna.honeywell.com/WebAPI/emea/api/v1/location"
"/%s/installationInfo?includeTemperatureControlSystems=True"
% self._get_location(location))
response = requests.get(url, headers=self._headers())
response.raise_for_status()
return response.json()
|
Return the full details of the installation.
|
train
|
https://github.com/watchforstock/evohome-client/blob/f1cb9273e97946d79c0651f00a218abbf7ada53a/evohomeclient2/__init__.py#L225-L234
|
[
"def _headers(self):\n \"\"\"Ensure the Authorization Header has a valid Access Token.\"\"\"\n if not self.access_token or not self.access_token_expires:\n self._basic_login()\n\n elif datetime.now() > self.access_token_expires - timedelta(seconds=30):\n self._basic_login()\n\n return {'Accept': HEADER_ACCEPT,\n 'Authorization': 'bearer ' + self.access_token}\n",
"def _get_location(self, location):\n if location is None:\n return self.installation_info[0]['locationInfo']['locationId']\n return location\n"
] |
class EvohomeClient(object): # pylint: disable=too-many-instance-attributes,useless-object-inheritance
"""Provides access to the v2 Evohome API."""
def __init__(self, username, password, debug=False, refresh_token=None, # pylint: disable=too-many-arguments
access_token=None, access_token_expires=None):
"""Construct the EvohomeClient object."""
if debug is True:
_LOGGER.setLevel(logging.DEBUG)
_LOGGER.debug("Debug mode is explicitly enabled.")
requests_logger = logging.getLogger("requests.packages.urllib3")
requests_logger.setLevel(logging.DEBUG)
requests_logger.propagate = True
http_client.HTTPConnection.debuglevel = 1
else:
_LOGGER.debug(
"Debug mode is not explicitly enabled "
"(but may be enabled elsewhere)."
)
self.username = username
self.password = password
self.refresh_token = refresh_token
self.access_token = access_token
self.access_token_expires = access_token_expires
self.account_info = None
self.locations = None
self.installation_info = None
self.system_id = None
self._login()
def _login(self):
self.user_account()
self.installation()
def _headers(self):
"""Ensure the Authorization Header has a valid Access Token."""
if not self.access_token or not self.access_token_expires:
self._basic_login()
elif datetime.now() > self.access_token_expires - timedelta(seconds=30):
self._basic_login()
return {'Accept': HEADER_ACCEPT,
'Authorization': 'bearer ' + self.access_token}
def _basic_login(self):
"""Obtain a new access token from the vendor.
First, try using the refresh_token, if one is available, otherwise
authenticate using the user credentials.
"""
_LOGGER.debug("No/Expired/Invalid access_token, re-authenticating...")
self.access_token = self.access_token_expires = None
if self.refresh_token:
_LOGGER.debug("Trying refresh_token...")
credentials = {'grant_type': "refresh_token",
'scope': "EMEA-V1-Basic EMEA-V1-Anonymous",
'refresh_token': self.refresh_token}
try:
self._obtain_access_token(credentials)
except (requests.HTTPError, KeyError, ValueError):
_LOGGER.warning(
"Invalid refresh_token, will try user credentials.")
self.refresh_token = None
if not self.refresh_token:
_LOGGER.debug("Trying user credentials...")
credentials = {'grant_type': "password",
'scope': "EMEA-V1-Basic EMEA-V1-Anonymous "
"EMEA-V1-Get-Current-User-Account",
'Username': self.username,
'Password': self.password}
self._obtain_access_token(credentials)
_LOGGER.debug("refresh_token = %s", self.refresh_token)
_LOGGER.debug("access_token = %s", self.access_token)
_LOGGER.debug("access_token_expires = %s",
self.access_token_expires.strftime("%Y-%m-%d %H:%M:%S"))
def _obtain_access_token(self, credentials):
url = 'https://tccna.honeywell.com/Auth/OAuth/Token'
payload = {
'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8',
'Host': 'rs.alarmnet.com/',
'Cache-Control': 'no-store no-cache',
'Pragma': 'no-cache',
'Connection': 'Keep-Alive'
}
payload.update(credentials) # merge the credentials into the payload
response = requests.post(url, data=payload, headers=HEADER_BASIC_AUTH)
try:
response.raise_for_status()
except requests.HTTPError:
msg = "Unable to obtain an Access Token"
if response.text: # if there is a message, then raise with it
msg = msg + ", hint: " + response.text
raise AuthenticationError(msg)
try: # the access token _should_ be valid...
# this may cause a ValueError
response_json = response.json()
# these may cause a KeyError
self.access_token = response_json['access_token']
self.access_token_expires = (
datetime.now() +
timedelta(seconds=response_json['expires_in'])
)
self.refresh_token = response_json['refresh_token']
except KeyError:
raise AuthenticationError("Unable to obtain an Access Token, "
"hint: " + response_json)
except ValueError:
raise AuthenticationError("Unable to obtain an Access Token, "
"hint: " + response.text)
def _get_location(self, location):
if location is None:
return self.installation_info[0]['locationInfo']['locationId']
return location
def _get_single_heating_system(self):
# This allows a shortcut for some systems
if len(self.locations) != 1:
raise Exception("More (or less) than one location available")
if len(self.locations[0]._gateways) != 1: # pylint: disable=protected-access
raise Exception("More (or less) than one gateway available")
if len(self.locations[0]._gateways[0]._control_systems) != 1: # pylint: disable=protected-access
raise Exception("More (or less) than one control system available")
return self.locations[0]._gateways[0]._control_systems[0] # pylint: disable=protected-access
def user_account(self):
"""Return the user account information."""
self.account_info = None
url = 'https://tccna.honeywell.com/WebAPI/emea/api/v1/userAccount'
response = requests.get(url, headers=self._headers())
response.raise_for_status()
self.account_info = response.json()
return self.account_info
def installation(self):
"""Return the details of the installation."""
self.locations = []
url = ("https://tccna.honeywell.com/WebAPI/emea/api/v1/location"
"/installationInfo?userId=%s"
"&includeTemperatureControlSystems=True"
% self.account_info['userId'])
response = requests.get(url, headers=self._headers())
response.raise_for_status()
self.installation_info = response.json()
self.system_id = (self.installation_info[0]['gateways'][0]
['temperatureControlSystems'][0]['systemId'])
for loc_data in self.installation_info:
self.locations.append(Location(self, loc_data))
return self.installation_info
def gateway(self):
"""Return the detail of the gateway."""
url = 'https://tccna.honeywell.com/WebAPI/emea/api/v1/gateway'
response = requests.get(url, headers=self._headers())
response.raise_for_status()
return response.json()
def set_status_normal(self):
"""Set the system into normal heating mode."""
return self._get_single_heating_system().set_status_normal()
def set_status_reset(self):
"""Reset the system mode."""
return self._get_single_heating_system().set_status_reset()
def set_status_custom(self, until=None):
"""Set the system into custom heating mode."""
return self._get_single_heating_system().set_status_custom(until)
def set_status_eco(self, until=None):
"""Set the system into eco heating mode."""
return self._get_single_heating_system().set_status_eco(until)
def set_status_away(self, until=None):
"""Set the system into away heating mode."""
return self._get_single_heating_system().set_status_away(until)
def set_status_dayoff(self, until=None):
"""Set the system into day off heating mode."""
return self._get_single_heating_system().set_status_dayoff(until)
def set_status_heatingoff(self, until=None):
"""Set the system into heating off heating mode."""
return self._get_single_heating_system().set_status_heatingoff(until)
def temperatures(self):
"""Return the current zone temperatures and set points."""
return self._get_single_heating_system().temperatures()
def zone_schedules_backup(self, filename):
"""Back up the current system configuration to the given file."""
return self._get_single_heating_system().zone_schedules_backup(filename)
def zone_schedules_restore(self, filename):
"""Restore the current system configuration from the given file."""
return self._get_single_heating_system().zone_schedules_restore(filename)
|
watchforstock/evohome-client
|
evohomeclient2/__init__.py
|
EvohomeClient.gateway
|
python
|
def gateway(self):
url = 'https://tccna.honeywell.com/WebAPI/emea/api/v1/gateway'
response = requests.get(url, headers=self._headers())
response.raise_for_status()
return response.json()
|
Return the detail of the gateway.
|
train
|
https://github.com/watchforstock/evohome-client/blob/f1cb9273e97946d79c0651f00a218abbf7ada53a/evohomeclient2/__init__.py#L236-L243
|
[
"def _headers(self):\n \"\"\"Ensure the Authorization Header has a valid Access Token.\"\"\"\n if not self.access_token or not self.access_token_expires:\n self._basic_login()\n\n elif datetime.now() > self.access_token_expires - timedelta(seconds=30):\n self._basic_login()\n\n return {'Accept': HEADER_ACCEPT,\n 'Authorization': 'bearer ' + self.access_token}\n"
] |
class EvohomeClient(object): # pylint: disable=too-many-instance-attributes,useless-object-inheritance
"""Provides access to the v2 Evohome API."""
def __init__(self, username, password, debug=False, refresh_token=None, # pylint: disable=too-many-arguments
access_token=None, access_token_expires=None):
"""Construct the EvohomeClient object."""
if debug is True:
_LOGGER.setLevel(logging.DEBUG)
_LOGGER.debug("Debug mode is explicitly enabled.")
requests_logger = logging.getLogger("requests.packages.urllib3")
requests_logger.setLevel(logging.DEBUG)
requests_logger.propagate = True
http_client.HTTPConnection.debuglevel = 1
else:
_LOGGER.debug(
"Debug mode is not explicitly enabled "
"(but may be enabled elsewhere)."
)
self.username = username
self.password = password
self.refresh_token = refresh_token
self.access_token = access_token
self.access_token_expires = access_token_expires
self.account_info = None
self.locations = None
self.installation_info = None
self.system_id = None
self._login()
def _login(self):
self.user_account()
self.installation()
def _headers(self):
"""Ensure the Authorization Header has a valid Access Token."""
if not self.access_token or not self.access_token_expires:
self._basic_login()
elif datetime.now() > self.access_token_expires - timedelta(seconds=30):
self._basic_login()
return {'Accept': HEADER_ACCEPT,
'Authorization': 'bearer ' + self.access_token}
def _basic_login(self):
"""Obtain a new access token from the vendor.
First, try using the refresh_token, if one is available, otherwise
authenticate using the user credentials.
"""
_LOGGER.debug("No/Expired/Invalid access_token, re-authenticating...")
self.access_token = self.access_token_expires = None
if self.refresh_token:
_LOGGER.debug("Trying refresh_token...")
credentials = {'grant_type': "refresh_token",
'scope': "EMEA-V1-Basic EMEA-V1-Anonymous",
'refresh_token': self.refresh_token}
try:
self._obtain_access_token(credentials)
except (requests.HTTPError, KeyError, ValueError):
_LOGGER.warning(
"Invalid refresh_token, will try user credentials.")
self.refresh_token = None
if not self.refresh_token:
_LOGGER.debug("Trying user credentials...")
credentials = {'grant_type': "password",
'scope': "EMEA-V1-Basic EMEA-V1-Anonymous "
"EMEA-V1-Get-Current-User-Account",
'Username': self.username,
'Password': self.password}
self._obtain_access_token(credentials)
_LOGGER.debug("refresh_token = %s", self.refresh_token)
_LOGGER.debug("access_token = %s", self.access_token)
_LOGGER.debug("access_token_expires = %s",
self.access_token_expires.strftime("%Y-%m-%d %H:%M:%S"))
def _obtain_access_token(self, credentials):
url = 'https://tccna.honeywell.com/Auth/OAuth/Token'
payload = {
'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8',
'Host': 'rs.alarmnet.com/',
'Cache-Control': 'no-store no-cache',
'Pragma': 'no-cache',
'Connection': 'Keep-Alive'
}
payload.update(credentials) # merge the credentials into the payload
response = requests.post(url, data=payload, headers=HEADER_BASIC_AUTH)
try:
response.raise_for_status()
except requests.HTTPError:
msg = "Unable to obtain an Access Token"
if response.text: # if there is a message, then raise with it
msg = msg + ", hint: " + response.text
raise AuthenticationError(msg)
try: # the access token _should_ be valid...
# this may cause a ValueError
response_json = response.json()
# these may cause a KeyError
self.access_token = response_json['access_token']
self.access_token_expires = (
datetime.now() +
timedelta(seconds=response_json['expires_in'])
)
self.refresh_token = response_json['refresh_token']
except KeyError:
raise AuthenticationError("Unable to obtain an Access Token, "
"hint: " + response_json)
except ValueError:
raise AuthenticationError("Unable to obtain an Access Token, "
"hint: " + response.text)
def _get_location(self, location):
if location is None:
return self.installation_info[0]['locationInfo']['locationId']
return location
def _get_single_heating_system(self):
# This allows a shortcut for some systems
if len(self.locations) != 1:
raise Exception("More (or less) than one location available")
if len(self.locations[0]._gateways) != 1: # pylint: disable=protected-access
raise Exception("More (or less) than one gateway available")
if len(self.locations[0]._gateways[0]._control_systems) != 1: # pylint: disable=protected-access
raise Exception("More (or less) than one control system available")
return self.locations[0]._gateways[0]._control_systems[0] # pylint: disable=protected-access
def user_account(self):
"""Return the user account information."""
self.account_info = None
url = 'https://tccna.honeywell.com/WebAPI/emea/api/v1/userAccount'
response = requests.get(url, headers=self._headers())
response.raise_for_status()
self.account_info = response.json()
return self.account_info
def installation(self):
"""Return the details of the installation."""
self.locations = []
url = ("https://tccna.honeywell.com/WebAPI/emea/api/v1/location"
"/installationInfo?userId=%s"
"&includeTemperatureControlSystems=True"
% self.account_info['userId'])
response = requests.get(url, headers=self._headers())
response.raise_for_status()
self.installation_info = response.json()
self.system_id = (self.installation_info[0]['gateways'][0]
['temperatureControlSystems'][0]['systemId'])
for loc_data in self.installation_info:
self.locations.append(Location(self, loc_data))
return self.installation_info
def full_installation(self, location=None):
"""Return the full details of the installation."""
url = ("https://tccna.honeywell.com/WebAPI/emea/api/v1/location"
"/%s/installationInfo?includeTemperatureControlSystems=True"
% self._get_location(location))
response = requests.get(url, headers=self._headers())
response.raise_for_status()
return response.json()
def set_status_normal(self):
"""Set the system into normal heating mode."""
return self._get_single_heating_system().set_status_normal()
def set_status_reset(self):
"""Reset the system mode."""
return self._get_single_heating_system().set_status_reset()
def set_status_custom(self, until=None):
"""Set the system into custom heating mode."""
return self._get_single_heating_system().set_status_custom(until)
def set_status_eco(self, until=None):
"""Set the system into eco heating mode."""
return self._get_single_heating_system().set_status_eco(until)
def set_status_away(self, until=None):
"""Set the system into away heating mode."""
return self._get_single_heating_system().set_status_away(until)
def set_status_dayoff(self, until=None):
"""Set the system into day off heating mode."""
return self._get_single_heating_system().set_status_dayoff(until)
def set_status_heatingoff(self, until=None):
"""Set the system into heating off heating mode."""
return self._get_single_heating_system().set_status_heatingoff(until)
def temperatures(self):
"""Return the current zone temperatures and set points."""
return self._get_single_heating_system().temperatures()
def zone_schedules_backup(self, filename):
"""Back up the current system configuration to the given file."""
return self._get_single_heating_system().zone_schedules_backup(filename)
def zone_schedules_restore(self, filename):
"""Restore the current system configuration from the given file."""
return self._get_single_heating_system().zone_schedules_restore(filename)
|
watchforstock/evohome-client
|
evohomeclient2/controlsystem.py
|
ControlSystem.temperatures
|
python
|
def temperatures(self):
self.location.status()
if self.hotwater:
yield {
'thermostat': 'DOMESTIC_HOT_WATER',
'id': self.hotwater.dhwId,
'name': '',
'temp': self.hotwater.temperatureStatus['temperature'], # pylint: disable=no-member
'setpoint': ''
}
for zone in self._zones:
zone_info = {
'thermostat': 'EMEA_ZONE',
'id': zone.zoneId,
'name': zone.name,
'temp': None,
'setpoint': zone.setpointStatus['targetHeatTemperature']
}
if zone.temperatureStatus['isAvailable']:
zone_info['temp'] = zone.temperatureStatus['temperature']
yield zone_info
|
Return a generator with the details of each zone.
|
train
|
https://github.com/watchforstock/evohome-client/blob/f1cb9273e97946d79c0651f00a218abbf7ada53a/evohomeclient2/controlsystem.py#L92-L116
| null |
class ControlSystem(object): # pylint: disable=useless-object-inheritance, too-many-instance-attributes
"""Provides handling of a control system."""
def __init__(self, client, location, gateway, data=None):
self.client = client
self.location = location
self.gateway = gateway
self._zones = []
self.zones = {}
self.zones_by_id = {}
self.hotwater = None
self.systemId = None # pylint: disable=invalid-name
if data is not None:
local_data = dict(data)
del local_data['zones']
self.__dict__.update(local_data)
for z_data in data['zones']:
zone = Zone(client, z_data)
self._zones.append(zone)
self.zones[zone.name] = zone
self.zones_by_id[zone.zoneId] = zone
if 'dhw' in data:
self.hotwater = HotWater(client, data['dhw'])
def _set_status(self, mode, until=None):
headers = dict(self.client._headers()) # pylint: disable=protected-access
headers['Content-Type'] = 'application/json'
if until is None:
data = {"SystemMode": mode, "TimeUntil": None, "Permanent": True}
else:
data = {
"SystemMode": mode,
"TimeUntil": "%sT00:00:00Z" % until.strftime('%Y-%m-%d'),
"Permanent": False
}
response = requests.put(
"https://tccna.honeywell.com/WebAPI/emea/api/v1"
"/temperatureControlSystem/%s/mode" % self.systemId,
data=json.dumps(data), headers=headers
)
response.raise_for_status()
def set_status_normal(self):
"""Set the system into normal mode."""
self._set_status("Auto")
def set_status_reset(self):
"""Reset the system into normal mode.
This will also set all the zones to FollowSchedule mode.
"""
self._set_status("AutoWithReset")
def set_status_custom(self, until=None):
"""Set the system into custom mode."""
self._set_status("Custom", until)
def set_status_eco(self, until=None):
"""Set the system into eco mode."""
self._set_status("AutoWithEco", until)
def set_status_away(self, until=None):
"""Set the system into away mode."""
self._set_status("Away", until)
def set_status_dayoff(self, until=None):
"""Set the system into dayoff mode."""
self._set_status("DayOff", until)
def set_status_heatingoff(self, until=None):
"""Set the system into heating off mode."""
self._set_status("HeatingOff", until)
def zone_schedules_backup(self, filename):
"""Backup all zones on control system to the given file."""
_LOGGER.info("Backing up schedules from ControlSystem: %s (%s)...",
self.systemId, self.location.name)
schedules = {}
if self.hotwater:
_LOGGER.info("Retrieving DHW schedule: %s...",
self.hotwater.zoneId)
schedule = self.hotwater.schedule()
schedules[self.hotwater.zoneId] = {
'name': 'Domestic Hot Water',
'schedule': schedule}
for zone in self._zones:
zone_id = zone.zoneId
name = zone.name
_LOGGER.info("Retrieving Zone schedule: %s - %s", zone_id, name)
schedule = zone.schedule()
schedules[zone_id] = {'name': name, 'schedule': schedule}
schedule_db = json.dumps(schedules, indent=4)
_LOGGER.info("Writing to backup file: %s...", filename)
with open(filename, 'w') as file_output:
file_output.write(schedule_db)
_LOGGER.info("Backup completed.")
def zone_schedules_restore(self, filename):
"""Restore all zones on control system from the given file."""
_LOGGER.info("Restoring schedules to ControlSystem %s (%s)...",
self.systemId, self.location)
_LOGGER.info("Reading from backup file: %s...", filename)
with open(filename, 'r') as file_input:
schedule_db = file_input.read()
schedules = json.loads(schedule_db)
for zone_id, zone_schedule in schedules.items():
name = zone_schedule['name']
zone_info = zone_schedule['schedule']
_LOGGER.info("Restoring schedule for: %s - %s...",
zone_id, name)
if self.hotwater and self.hotwater.zoneId == zone_id:
self.hotwater.set_schedule(json.dumps(zone_info))
else:
self.zones_by_id[zone_id].set_schedule(
json.dumps(zone_info))
_LOGGER.info("Restore completed.")
|
watchforstock/evohome-client
|
evohomeclient2/controlsystem.py
|
ControlSystem.zone_schedules_backup
|
python
|
def zone_schedules_backup(self, filename):
_LOGGER.info("Backing up schedules from ControlSystem: %s (%s)...",
self.systemId, self.location.name)
schedules = {}
if self.hotwater:
_LOGGER.info("Retrieving DHW schedule: %s...",
self.hotwater.zoneId)
schedule = self.hotwater.schedule()
schedules[self.hotwater.zoneId] = {
'name': 'Domestic Hot Water',
'schedule': schedule}
for zone in self._zones:
zone_id = zone.zoneId
name = zone.name
_LOGGER.info("Retrieving Zone schedule: %s - %s", zone_id, name)
schedule = zone.schedule()
schedules[zone_id] = {'name': name, 'schedule': schedule}
schedule_db = json.dumps(schedules, indent=4)
_LOGGER.info("Writing to backup file: %s...", filename)
with open(filename, 'w') as file_output:
file_output.write(schedule_db)
_LOGGER.info("Backup completed.")
|
Backup all zones on control system to the given file.
|
train
|
https://github.com/watchforstock/evohome-client/blob/f1cb9273e97946d79c0651f00a218abbf7ada53a/evohomeclient2/controlsystem.py#L118-L149
| null |
class ControlSystem(object): # pylint: disable=useless-object-inheritance, too-many-instance-attributes
"""Provides handling of a control system."""
def __init__(self, client, location, gateway, data=None):
self.client = client
self.location = location
self.gateway = gateway
self._zones = []
self.zones = {}
self.zones_by_id = {}
self.hotwater = None
self.systemId = None # pylint: disable=invalid-name
if data is not None:
local_data = dict(data)
del local_data['zones']
self.__dict__.update(local_data)
for z_data in data['zones']:
zone = Zone(client, z_data)
self._zones.append(zone)
self.zones[zone.name] = zone
self.zones_by_id[zone.zoneId] = zone
if 'dhw' in data:
self.hotwater = HotWater(client, data['dhw'])
def _set_status(self, mode, until=None):
headers = dict(self.client._headers()) # pylint: disable=protected-access
headers['Content-Type'] = 'application/json'
if until is None:
data = {"SystemMode": mode, "TimeUntil": None, "Permanent": True}
else:
data = {
"SystemMode": mode,
"TimeUntil": "%sT00:00:00Z" % until.strftime('%Y-%m-%d'),
"Permanent": False
}
response = requests.put(
"https://tccna.honeywell.com/WebAPI/emea/api/v1"
"/temperatureControlSystem/%s/mode" % self.systemId,
data=json.dumps(data), headers=headers
)
response.raise_for_status()
def set_status_normal(self):
"""Set the system into normal mode."""
self._set_status("Auto")
def set_status_reset(self):
"""Reset the system into normal mode.
This will also set all the zones to FollowSchedule mode.
"""
self._set_status("AutoWithReset")
def set_status_custom(self, until=None):
"""Set the system into custom mode."""
self._set_status("Custom", until)
def set_status_eco(self, until=None):
"""Set the system into eco mode."""
self._set_status("AutoWithEco", until)
def set_status_away(self, until=None):
"""Set the system into away mode."""
self._set_status("Away", until)
def set_status_dayoff(self, until=None):
"""Set the system into dayoff mode."""
self._set_status("DayOff", until)
def set_status_heatingoff(self, until=None):
"""Set the system into heating off mode."""
self._set_status("HeatingOff", until)
def temperatures(self):
"""Return a generator with the details of each zone."""
self.location.status()
if self.hotwater:
yield {
'thermostat': 'DOMESTIC_HOT_WATER',
'id': self.hotwater.dhwId,
'name': '',
'temp': self.hotwater.temperatureStatus['temperature'], # pylint: disable=no-member
'setpoint': ''
}
for zone in self._zones:
zone_info = {
'thermostat': 'EMEA_ZONE',
'id': zone.zoneId,
'name': zone.name,
'temp': None,
'setpoint': zone.setpointStatus['targetHeatTemperature']
}
if zone.temperatureStatus['isAvailable']:
zone_info['temp'] = zone.temperatureStatus['temperature']
yield zone_info
def zone_schedules_restore(self, filename):
"""Restore all zones on control system from the given file."""
_LOGGER.info("Restoring schedules to ControlSystem %s (%s)...",
self.systemId, self.location)
_LOGGER.info("Reading from backup file: %s...", filename)
with open(filename, 'r') as file_input:
schedule_db = file_input.read()
schedules = json.loads(schedule_db)
for zone_id, zone_schedule in schedules.items():
name = zone_schedule['name']
zone_info = zone_schedule['schedule']
_LOGGER.info("Restoring schedule for: %s - %s...",
zone_id, name)
if self.hotwater and self.hotwater.zoneId == zone_id:
self.hotwater.set_schedule(json.dumps(zone_info))
else:
self.zones_by_id[zone_id].set_schedule(
json.dumps(zone_info))
_LOGGER.info("Restore completed.")
|
watchforstock/evohome-client
|
evohomeclient2/controlsystem.py
|
ControlSystem.zone_schedules_restore
|
python
|
def zone_schedules_restore(self, filename):
_LOGGER.info("Restoring schedules to ControlSystem %s (%s)...",
self.systemId, self.location)
_LOGGER.info("Reading from backup file: %s...", filename)
with open(filename, 'r') as file_input:
schedule_db = file_input.read()
schedules = json.loads(schedule_db)
for zone_id, zone_schedule in schedules.items():
name = zone_schedule['name']
zone_info = zone_schedule['schedule']
_LOGGER.info("Restoring schedule for: %s - %s...",
zone_id, name)
if self.hotwater and self.hotwater.zoneId == zone_id:
self.hotwater.set_schedule(json.dumps(zone_info))
else:
self.zones_by_id[zone_id].set_schedule(
json.dumps(zone_info))
_LOGGER.info("Restore completed.")
|
Restore all zones on control system from the given file.
|
train
|
https://github.com/watchforstock/evohome-client/blob/f1cb9273e97946d79c0651f00a218abbf7ada53a/evohomeclient2/controlsystem.py#L151-L174
| null |
class ControlSystem(object): # pylint: disable=useless-object-inheritance, too-many-instance-attributes
"""Provides handling of a control system."""
def __init__(self, client, location, gateway, data=None):
self.client = client
self.location = location
self.gateway = gateway
self._zones = []
self.zones = {}
self.zones_by_id = {}
self.hotwater = None
self.systemId = None # pylint: disable=invalid-name
if data is not None:
local_data = dict(data)
del local_data['zones']
self.__dict__.update(local_data)
for z_data in data['zones']:
zone = Zone(client, z_data)
self._zones.append(zone)
self.zones[zone.name] = zone
self.zones_by_id[zone.zoneId] = zone
if 'dhw' in data:
self.hotwater = HotWater(client, data['dhw'])
def _set_status(self, mode, until=None):
headers = dict(self.client._headers()) # pylint: disable=protected-access
headers['Content-Type'] = 'application/json'
if until is None:
data = {"SystemMode": mode, "TimeUntil": None, "Permanent": True}
else:
data = {
"SystemMode": mode,
"TimeUntil": "%sT00:00:00Z" % until.strftime('%Y-%m-%d'),
"Permanent": False
}
response = requests.put(
"https://tccna.honeywell.com/WebAPI/emea/api/v1"
"/temperatureControlSystem/%s/mode" % self.systemId,
data=json.dumps(data), headers=headers
)
response.raise_for_status()
def set_status_normal(self):
"""Set the system into normal mode."""
self._set_status("Auto")
def set_status_reset(self):
"""Reset the system into normal mode.
This will also set all the zones to FollowSchedule mode.
"""
self._set_status("AutoWithReset")
def set_status_custom(self, until=None):
"""Set the system into custom mode."""
self._set_status("Custom", until)
def set_status_eco(self, until=None):
"""Set the system into eco mode."""
self._set_status("AutoWithEco", until)
def set_status_away(self, until=None):
"""Set the system into away mode."""
self._set_status("Away", until)
def set_status_dayoff(self, until=None):
"""Set the system into dayoff mode."""
self._set_status("DayOff", until)
def set_status_heatingoff(self, until=None):
"""Set the system into heating off mode."""
self._set_status("HeatingOff", until)
def temperatures(self):
"""Return a generator with the details of each zone."""
self.location.status()
if self.hotwater:
yield {
'thermostat': 'DOMESTIC_HOT_WATER',
'id': self.hotwater.dhwId,
'name': '',
'temp': self.hotwater.temperatureStatus['temperature'], # pylint: disable=no-member
'setpoint': ''
}
for zone in self._zones:
zone_info = {
'thermostat': 'EMEA_ZONE',
'id': zone.zoneId,
'name': zone.name,
'temp': None,
'setpoint': zone.setpointStatus['targetHeatTemperature']
}
if zone.temperatureStatus['isAvailable']:
zone_info['temp'] = zone.temperatureStatus['temperature']
yield zone_info
def zone_schedules_backup(self, filename):
"""Backup all zones on control system to the given file."""
_LOGGER.info("Backing up schedules from ControlSystem: %s (%s)...",
self.systemId, self.location.name)
schedules = {}
if self.hotwater:
_LOGGER.info("Retrieving DHW schedule: %s...",
self.hotwater.zoneId)
schedule = self.hotwater.schedule()
schedules[self.hotwater.zoneId] = {
'name': 'Domestic Hot Water',
'schedule': schedule}
for zone in self._zones:
zone_id = zone.zoneId
name = zone.name
_LOGGER.info("Retrieving Zone schedule: %s - %s", zone_id, name)
schedule = zone.schedule()
schedules[zone_id] = {'name': name, 'schedule': schedule}
schedule_db = json.dumps(schedules, indent=4)
_LOGGER.info("Writing to backup file: %s...", filename)
with open(filename, 'w') as file_output:
file_output.write(schedule_db)
_LOGGER.info("Backup completed.")
|
gtalarico/airtable-python-wrapper
|
airtable/params.py
|
_BaseObjectArrayParam.to_param_dict
|
python
|
def to_param_dict(self):
""" Sorts to ensure Order is consistent for Testing """
param_dict = {}
for index, dictionary in enumerate(self.value):
for key, value in dictionary.items():
param_name = '{param_name}[{index}][{key}]'.format(
param_name=self.param_name,
index=index,
key=key)
param_dict[param_name] = value
return OrderedDict(sorted(param_dict.items()))
|
Sorts to ensure Order is consistent for Testing
|
train
|
https://github.com/gtalarico/airtable-python-wrapper/blob/48b2d806178085b52a31817571e5a1fc3dce4045/airtable/params.py#L68-L78
| null |
class _BaseObjectArrayParam(_BaseParam):
"""
Api Expects Array of Objects:
>>> [{field: "UUID", direction: "desc"}, {...}]
Requests Params Input:
>>> params={'sort': ['FieldOne', '-FieldTwo']}
or
>>> params={'sort': [('FieldOne', 'asc'), ('-FieldTwo', 'desc')]}
Requests Url Params Encoding:
>>> ?sort=field&sort=direction&sort=field&sort=direction
Expected Url Params:
>>> ?sort[0][field]=FieldOne&sort[0][direction]=asc
"""
def to_param_dict(self):
""" Sorts to ensure Order is consistent for Testing """
param_dict = {}
for index, dictionary in enumerate(self.value):
for key, value in dictionary.items():
param_name = '{param_name}[{index}][{key}]'.format(
param_name=self.param_name,
index=index,
key=key)
param_dict[param_name] = value
return OrderedDict(sorted(param_dict.items()))
|
gtalarico/airtable-python-wrapper
|
airtable/params.py
|
AirtableParams._discover_params
|
python
|
def _discover_params(cls):
"""
Returns a dict where filter keyword is key, and class is value.
To handle param alias (maxRecords or max_records), both versions are
added.
"""
try:
return cls.filters
except AttributeError:
filters = {}
for param_class_name in dir(cls):
param_class = getattr(cls, param_class_name)
if hasattr(param_class, 'kwarg'):
filters[param_class.kwarg] = param_class
filters[param_class.param_name] = param_class
cls.filters = filters
return cls.filters
|
Returns a dict where filter keyword is key, and class is value.
To handle param alias (maxRecords or max_records), both versions are
added.
|
train
|
https://github.com/gtalarico/airtable-python-wrapper/blob/48b2d806178085b52a31817571e5a1fc3dce4045/airtable/params.py#L342-L359
| null |
class AirtableParams():
class MaxRecordsParam(_BaseParam):
"""
Max Records Param
Kwargs:
``max_records=`` or ``maxRecords=``
The maximum total number of records that will be returned.
Usage:
>>> airtable.get_all(max_records=10)
Args:
max_records (``int``): The maximum total number of records that
will be returned.
"""
# Class Input > Output
# >>> filter = MaxRecordsParam(100)
# >>> filter.to_param_dict()
# {'maxRecords: 100}
param_name = 'maxRecords'
kwarg = 'max_records'
class ViewParam(_BaseParam):
"""
View Param
Kwargs:
``view=``
If set, only the records in that view will be returned.
The records will be sorted according to the order of the view.
Usage:
>>> airtable.get_all(view='My View')
Args:
view (``str``): The name or ID of a view.
"""
# Class Input > Output
# >>> filter = ViewParam('Name or Id Of View')
# >>> filter.to_param_dict()
# {'view: 'Name or Id Of View'}
param_name = 'view'
kwarg = param_name
class PageSizeParam(_BaseParam):
"""
Page Size Param
Kwargs:
``page_size=`` or ``pageSize=``
Limits the maximum number of records returned in each request.
Default is 100.
Usage:
>>> airtable.get_all(page_size=50)
Args:
page_size (``int``): The number of records returned in each request.
Must be less than or equal to 100. Default is 100.
"""
# Class Input > Output
# >>> filter = PageSizeParam(50)
# >>> filter.to_param_dict()
# {'pageSize: 50}
param_name = 'pageSize'
kwarg = 'page_size'
class FormulaParam(_BaseParam):
"""
Formula Param
Kwargs:
``formula=`` or ``filterByFormula=``
The formula will be evaluated for each record, and if the result
is not 0, false, "", NaN, [], or #Error! the record will be included
in the response.
If combined with view, only records in that view which satisfy the
formula will be returned. For example, to only include records where
``COLUMN_A`` isn't empty, pass in: ``"NOT({COLUMN_A}='')"``
For more information see
`Airtable Docs on formulas. <https://airtable.com/api>`_
Usage - Text Column is not empty:
>>> airtable.get_all(formula="NOT({COLUMN_A}='')")
Usage - Text Column contains:
>>> airtable.get_all(formula="FIND('SomeSubText', {COLUMN_STR})=1")
Args:
formula (``str``): A valid Airtable formula.
"""
# Class Input > Output
# >>> param = FormulaParams("FIND('DUP', {COLUMN_STR})=1")
# >>> param.to_param_dict()
# {'formula': "FIND('WW')=1"}
param_name = 'filterByFormula'
kwarg = 'formula'
@staticmethod
def from_name_and_value(field_name, field_value):
"""
Creates a formula to match cells from from field_name and value
"""
if isinstance(field_value, str):
field_value = "'{}'".format(field_value)
formula = "{{{name}}}={value}".format(name=field_name,
value=field_value)
return formula
class _OffsetParam(_BaseParam):
"""
Offset Param
Kwargs:
``offset=``
If there are more records what was in the response,
the response body will contain an offset value.
To fetch the next page of records,
include offset in the next request's parameters.
This is used internally by :any:`get_all` and :any:`get_iter`.
Usage:
>>> airtable.get_iter(offset='recjAle5lryYOpMKk')
Args:
record_id (``str``, ``list``):
"""
# Class Input > Output
# >>> filter = _OffsetParam('recqgqThAnETLuH58')
# >>> filter.to_param_dict()
# {'offset: 'recqgqThAnETLuH58'}
param_name = 'offset'
kwarg = param_name
class FieldsParam(_BaseStringArrayParam):
"""
Fields Param
Kwargs:
``fields=``
Only data for fields whose names are in this list will be included in
the records. If you don't need every field, you can use this parameter
to reduce the amount of data transferred.
Usage:
>>> airtable.get(fields='ColumnA')
Multiple Columns:
>>> airtable.get(fields=['ColumnA', 'ColumnB'])
Args:
fields (``str``, ``list``): Name of columns you want to retrieve.
"""
# Class Input > Output
# >>> param = FieldsParam(['FieldOne', 'FieldTwo'])
# >>> param.to_param_dict()
# {'fields[]': ['FieldOne', 'FieldTwo']}
param_name = 'fields'
kwarg = param_name
class SortParam(_BaseObjectArrayParam):
"""
Sort Param
Kwargs:
``sort=``
Specifies how the records will be ordered. If you set the view
parameter, the returned records in that view will be sorted by these
fields.
If sorting by multiple columns, column names can be passed as a list.
Sorting Direction is ascending by default, but can be reversed by
prefixing the column name with a minus sign ``-``, or passing
``COLUMN_NAME, DIRECTION`` tuples. Direction options
are ``asc`` and ``desc``.
Usage:
>>> airtable.get(sort='ColumnA')
Multiple Columns:
>>> airtable.get(sort=['ColumnA', '-ColumnB'])
Explicit Directions:
>>> airtable.get(sort=[('ColumnA', 'asc'), ('ColumnB', 'desc')])
Args:
fields (``str``, ``list``): Name of columns and directions.
"""
# Class Input > Output
# >>> filter = SortParam([{'field': 'col', 'direction': 'asc'}])
# >>> filter.to_param_dict()
# {'sort[0]['field']: 'col', sort[0]['direction']: 'asc'}
param_name = 'sort'
kwarg = param_name
def __init__(self, value):
# Wraps string into list to avoid string iteration
if hasattr(value, 'startswith'):
value = [value]
self.value = []
direction = 'asc'
for item in value:
if not hasattr(item, 'startswith'):
field_name, direction = item
else:
if item.startswith('-'):
direction = 'desc'
field_name = item[1:]
else:
field_name = item
sort_param = {'field': field_name, 'direction': direction}
self.value.append(sort_param)
@classmethod
def _discover_params(cls):
"""
Returns a dict where filter keyword is key, and class is value.
To handle param alias (maxRecords or max_records), both versions are
added.
"""
try:
return cls.filters
except AttributeError:
filters = {}
for param_class_name in dir(cls):
param_class = getattr(cls, param_class_name)
if hasattr(param_class, 'kwarg'):
filters[param_class.kwarg] = param_class
filters[param_class.param_name] = param_class
cls.filters = filters
return cls.filters
@classmethod
def _get(cls, kwarg_name):
""" Returns a Param Class Instance, by its kwarg or param name """
param_classes = cls._discover_params()
try:
param_class = param_classes[kwarg_name]
except KeyError:
raise ValueError('invalid param keyword {}'.format(kwarg_name))
else:
return param_class
|
gtalarico/airtable-python-wrapper
|
airtable/params.py
|
AirtableParams._get
|
python
|
def _get(cls, kwarg_name):
""" Returns a Param Class Instance, by its kwarg or param name """
param_classes = cls._discover_params()
try:
param_class = param_classes[kwarg_name]
except KeyError:
raise ValueError('invalid param keyword {}'.format(kwarg_name))
else:
return param_class
|
Returns a Param Class Instance, by its kwarg or param name
|
train
|
https://github.com/gtalarico/airtable-python-wrapper/blob/48b2d806178085b52a31817571e5a1fc3dce4045/airtable/params.py#L362-L370
| null |
class AirtableParams():
class MaxRecordsParam(_BaseParam):
"""
Max Records Param
Kwargs:
``max_records=`` or ``maxRecords=``
The maximum total number of records that will be returned.
Usage:
>>> airtable.get_all(max_records=10)
Args:
max_records (``int``): The maximum total number of records that
will be returned.
"""
# Class Input > Output
# >>> filter = MaxRecordsParam(100)
# >>> filter.to_param_dict()
# {'maxRecords: 100}
param_name = 'maxRecords'
kwarg = 'max_records'
class ViewParam(_BaseParam):
"""
View Param
Kwargs:
``view=``
If set, only the records in that view will be returned.
The records will be sorted according to the order of the view.
Usage:
>>> airtable.get_all(view='My View')
Args:
view (``str``): The name or ID of a view.
"""
# Class Input > Output
# >>> filter = ViewParam('Name or Id Of View')
# >>> filter.to_param_dict()
# {'view: 'Name or Id Of View'}
param_name = 'view'
kwarg = param_name
class PageSizeParam(_BaseParam):
"""
Page Size Param
Kwargs:
``page_size=`` or ``pageSize=``
Limits the maximum number of records returned in each request.
Default is 100.
Usage:
>>> airtable.get_all(page_size=50)
Args:
page_size (``int``): The number of records returned in each request.
Must be less than or equal to 100. Default is 100.
"""
# Class Input > Output
# >>> filter = PageSizeParam(50)
# >>> filter.to_param_dict()
# {'pageSize: 50}
param_name = 'pageSize'
kwarg = 'page_size'
class FormulaParam(_BaseParam):
"""
Formula Param
Kwargs:
``formula=`` or ``filterByFormula=``
The formula will be evaluated for each record, and if the result
is not 0, false, "", NaN, [], or #Error! the record will be included
in the response.
If combined with view, only records in that view which satisfy the
formula will be returned. For example, to only include records where
``COLUMN_A`` isn't empty, pass in: ``"NOT({COLUMN_A}='')"``
For more information see
`Airtable Docs on formulas. <https://airtable.com/api>`_
Usage - Text Column is not empty:
>>> airtable.get_all(formula="NOT({COLUMN_A}='')")
Usage - Text Column contains:
>>> airtable.get_all(formula="FIND('SomeSubText', {COLUMN_STR})=1")
Args:
formula (``str``): A valid Airtable formula.
"""
# Class Input > Output
# >>> param = FormulaParams("FIND('DUP', {COLUMN_STR})=1")
# >>> param.to_param_dict()
# {'formula': "FIND('WW')=1"}
param_name = 'filterByFormula'
kwarg = 'formula'
@staticmethod
def from_name_and_value(field_name, field_value):
"""
Creates a formula to match cells from from field_name and value
"""
if isinstance(field_value, str):
field_value = "'{}'".format(field_value)
formula = "{{{name}}}={value}".format(name=field_name,
value=field_value)
return formula
class _OffsetParam(_BaseParam):
"""
Offset Param
Kwargs:
``offset=``
If there are more records what was in the response,
the response body will contain an offset value.
To fetch the next page of records,
include offset in the next request's parameters.
This is used internally by :any:`get_all` and :any:`get_iter`.
Usage:
>>> airtable.get_iter(offset='recjAle5lryYOpMKk')
Args:
record_id (``str``, ``list``):
"""
# Class Input > Output
# >>> filter = _OffsetParam('recqgqThAnETLuH58')
# >>> filter.to_param_dict()
# {'offset: 'recqgqThAnETLuH58'}
param_name = 'offset'
kwarg = param_name
class FieldsParam(_BaseStringArrayParam):
"""
Fields Param
Kwargs:
``fields=``
Only data for fields whose names are in this list will be included in
the records. If you don't need every field, you can use this parameter
to reduce the amount of data transferred.
Usage:
>>> airtable.get(fields='ColumnA')
Multiple Columns:
>>> airtable.get(fields=['ColumnA', 'ColumnB'])
Args:
fields (``str``, ``list``): Name of columns you want to retrieve.
"""
# Class Input > Output
# >>> param = FieldsParam(['FieldOne', 'FieldTwo'])
# >>> param.to_param_dict()
# {'fields[]': ['FieldOne', 'FieldTwo']}
param_name = 'fields'
kwarg = param_name
class SortParam(_BaseObjectArrayParam):
"""
Sort Param
Kwargs:
``sort=``
Specifies how the records will be ordered. If you set the view
parameter, the returned records in that view will be sorted by these
fields.
If sorting by multiple columns, column names can be passed as a list.
Sorting Direction is ascending by default, but can be reversed by
prefixing the column name with a minus sign ``-``, or passing
``COLUMN_NAME, DIRECTION`` tuples. Direction options
are ``asc`` and ``desc``.
Usage:
>>> airtable.get(sort='ColumnA')
Multiple Columns:
>>> airtable.get(sort=['ColumnA', '-ColumnB'])
Explicit Directions:
>>> airtable.get(sort=[('ColumnA', 'asc'), ('ColumnB', 'desc')])
Args:
fields (``str``, ``list``): Name of columns and directions.
"""
# Class Input > Output
# >>> filter = SortParam([{'field': 'col', 'direction': 'asc'}])
# >>> filter.to_param_dict()
# {'sort[0]['field']: 'col', sort[0]['direction']: 'asc'}
param_name = 'sort'
kwarg = param_name
def __init__(self, value):
# Wraps string into list to avoid string iteration
if hasattr(value, 'startswith'):
value = [value]
self.value = []
direction = 'asc'
for item in value:
if not hasattr(item, 'startswith'):
field_name, direction = item
else:
if item.startswith('-'):
direction = 'desc'
field_name = item[1:]
else:
field_name = item
sort_param = {'field': field_name, 'direction': direction}
self.value.append(sort_param)
@classmethod
def _discover_params(cls):
"""
Returns a dict where filter keyword is key, and class is value.
To handle param alias (maxRecords or max_records), both versions are
added.
"""
try:
return cls.filters
except AttributeError:
filters = {}
for param_class_name in dir(cls):
param_class = getattr(cls, param_class_name)
if hasattr(param_class, 'kwarg'):
filters[param_class.kwarg] = param_class
filters[param_class.param_name] = param_class
cls.filters = filters
return cls.filters
@classmethod
def _get(cls, kwarg_name):
""" Returns a Param Class Instance, by its kwarg or param name """
param_classes = cls._discover_params()
try:
param_class = param_classes[kwarg_name]
except KeyError:
raise ValueError('invalid param keyword {}'.format(kwarg_name))
else:
return param_class
|
gtalarico/airtable-python-wrapper
|
airtable/airtable.py
|
Airtable._process_params
|
python
|
def _process_params(self, params):
"""
Process params names or values as needed using filters
"""
new_params = OrderedDict()
for param_name, param_value in sorted(params.items()):
param_value = params[param_name]
ParamClass = AirtableParams._get(param_name)
new_params.update(ParamClass(param_value).to_param_dict())
return new_params
|
Process params names or values as needed using filters
|
train
|
https://github.com/gtalarico/airtable-python-wrapper/blob/48b2d806178085b52a31817571e5a1fc3dce4045/airtable/airtable.py#L141-L150
| null |
class Airtable():
VERSION = 'v0'
API_BASE_URL = 'https://api.airtable.com/'
API_LIMIT = 1.0 / 5 # 5 per second
API_URL = posixpath.join(API_BASE_URL, VERSION)
def __init__(self, base_key, table_name, api_key=None):
"""
If api_key is not provided, :any:`AirtableAuth` will attempt
to use ``os.environ['AIRTABLE_API_KEY']``
"""
session = requests.Session()
session.auth = AirtableAuth(api_key=api_key)
self.session = session
self.table_name = table_name
url_safe_table_name = quote(table_name, safe='')
self.url_table = posixpath.join(self.API_URL, base_key,
url_safe_table_name)
self.is_authenticated = self.validate_session(self.url_table)
def validate_session(self, url):
response = self.session.get(url, params={'maxRecords': 1})
if response.ok:
return True
elif response.status_code == 404:
raise ValueError('Invalid base or table name: {}'.format(url))
else:
raise ValueError(
'Authentication failed: {}'.format(response.reason))
def _process_params(self, params):
"""
Process params names or values as needed using filters
"""
new_params = OrderedDict()
for param_name, param_value in sorted(params.items()):
param_value = params[param_name]
ParamClass = AirtableParams._get(param_name)
new_params.update(ParamClass(param_value).to_param_dict())
return new_params
def _process_response(self, response):
try:
response.raise_for_status()
except requests.exceptions.HTTPError as exc:
err_msg = str(exc)
# Reports Decoded 422 Url for better troubleshooting
# Disabled in IronPython Bug:
# https://github.com/IronLanguages/ironpython2/issues/242
if not IS_IPY and response.status_code == 422:
err_msg = err_msg.replace(response.url, unquote(response.url))
err_msg += (' (Decoded URL)')
# Attempt to get Error message from response, Issue #16
try:
error_dict = response.json()
except json.decoder.JSONDecodeError:
pass
else:
if 'error' in error_dict:
err_msg += ' [Error: {}]'.format(error_dict['error'])
raise requests.exceptions.HTTPError(err_msg)
else:
return response.json()
def record_url(self, record_id):
""" Builds URL with record id """
return posixpath.join(self.url_table, record_id)
def _request(self, method, url, params=None, json_data=None):
response = self.session.request(method, url, params=params,
json=json_data)
return self._process_response(response)
def _get(self, url, **params):
processed_params = self._process_params(params)
return self._request('get', url, params=processed_params)
def _post(self, url, json_data):
return self._request('post', url, json_data=json_data)
def _put(self, url, json_data):
return self._request('put', url, json_data=json_data)
def _patch(self, url, json_data):
return self._request('patch', url, json_data=json_data)
def _delete(self, url):
return self._request('delete', url)
def get(self, record_id):
"""
Retrieves a record by its id
>>> record = airtable.get('recwPQIfs4wKPyc9D')
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Record
"""
record_url = self.record_url(record_id)
return self._get(record_url)
def get_iter(self, **options):
"""
Record Retriever Iterator
Returns iterator with lists in batches according to pageSize.
To get all records at once use :any:`get_all`
>>> for page in airtable.get_iter():
... for record in page:
... print(record)
[{'fields': ... }, ...]
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
page_size (``int``, optional ): The number of records returned
in each request. Must be less than or equal to 100.
Default is 100. See :any:`PageSizeParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
formula (``str``, optional): Airtable formula.
See :any:`FormulaParam`.
Returns:
iterator (``list``): List of Records, grouped by pageSize
"""
offset = None
while True:
data = self._get(self.url_table, offset=offset, **options)
records = data.get('records', [])
time.sleep(self.API_LIMIT)
yield records
offset = data.get('offset')
if not offset:
break
def get_all(self, **options):
"""
Retrieves all records repetitively and returns a single list.
>>> airtable.get_all()
>>> airtable.get_all(view='MyView', fields=['ColA', '-ColB'])
>>> airtable.get_all(maxRecords=50)
[{'fields': ... }, ...]
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
formula (``str``, optional): Airtable formula.
See :any:`FormulaParam`.
Returns:
records (``list``): List of Records
>>> records = get_all(maxRecords=3, view='All')
"""
all_records = []
for records in self.get_iter(**options):
all_records.extend(records)
return all_records
def match(self, field_name, field_value, **options):
"""
Returns first match found in :any:`get_all`
>>> airtable.match('Name', 'John')
{'fields': {'Name': 'John'} }
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): First record to match the field_value provided
"""
from_name_and_value = AirtableParams.FormulaParam.from_name_and_value
formula = from_name_and_value(field_name, field_value)
options['formula'] = formula
for record in self.get_all(**options):
return record
else:
return {}
def search(self, field_name, field_value, record=None, **options):
"""
Returns all matching records found in :any:`get_all`
>>> airtable.search('Gender', 'Male')
[{'fields': {'Name': 'John', 'Gender': 'Male'}, ... ]
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
records (``list``): All records that matched ``field_value``
"""
records = []
from_name_and_value = AirtableParams.FormulaParam.from_name_and_value
formula = from_name_and_value(field_name, field_value)
options['formula'] = formula
records = self.get_all(**options)
return records
def insert(self, fields, typecast=False):
"""
Inserts a record
>>> record = {'Name': 'John'}
>>> airtable.insert(record)
Args:
fields(``dict``): Fields to insert.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Inserted record
"""
return self._post(self.url_table, json_data={"fields": fields, "typecast": typecast})
def _batch_request(self, func, iterable):
""" Internal Function to limit batch calls to API limit """
responses = []
for item in iterable:
responses.append(func(item))
time.sleep(self.API_LIMIT)
return responses
def batch_insert(self, records, typecast=False):
"""
Calls :any:`insert` repetitively, following set API Rate Limit (5/sec)
To change the rate limit use ``airtable.API_LIMIT = 0.2``
(5 per second)
>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
>>> airtable.batch_insert(records)
Args:
records(``list``): Records to insert
typecast(``boolean``): Automatic data conversion from string values.
Returns:
records (``list``): list of added records
"""
return self._batch_request(self.insert, records)
def update(self, record_id, fields, typecast=False):
"""
Updates a record by its record id.
Only Fields passed are updated, the rest are left as is.
>>> record = airtable.match('Employee Id', 'DD13332454')
>>> fields = {'Status': 'Fired'}
>>> airtable.update(record['id'], fields)
Args:
record_id(``str``): Id of Record to update
fields(``dict``): Fields to update.
Must be dictionary with Column names as Key
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Updated record
"""
record_url = self.record_url(record_id)
return self._patch(record_url, json_data={"fields": fields, "typecast": typecast})
def update_by_field(self, field_name, field_value, fields, typecast=False, **options):
"""
Updates the first record to match field name and value.
Only Fields passed are updated, the rest are left as is.
>>> record = {'Name': 'John', 'Tel': '540-255-5522'}
>>> airtable.update_by_field('Name', 'John', record)
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
fields(``dict``): Fields to update.
Must be dictionary with Column names as Key
typecast(``boolean``): Automatic data conversion from string values.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): Updated record
"""
record = self.match(field_name, field_value, **options)
return {} if not record else self.update(record['id'], fields, typecast)
def replace(self, record_id, fields, typecast=False):
"""
Replaces a record by its record id.
All Fields are updated to match the new ``fields`` provided.
If a field is not included in ``fields``, value will bet set to null.
To update only selected fields, use :any:`update`.
>>> record = airtable.match('Seat Number', '22A')
>>> fields = {'PassangerName': 'Mike', 'Passport': 'YASD232-23'}
>>> airtable.replace(record['id'], fields)
Args:
record_id(``str``): Id of Record to update
fields(``dict``): Fields to replace with.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): New record
"""
record_url = self.record_url(record_id)
return self._put(record_url, json_data={"fields": fields, "typecast": typecast})
def replace_by_field(self, field_name, field_value, fields, typecast=False, **options):
"""
Replaces the first record to match field name and value.
All Fields are updated to match the new ``fields`` provided.
If a field is not included in ``fields``, value will bet set to null.
To update only selected fields, use :any:`update`.
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
fields(``dict``): Fields to replace with.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): New record
"""
record = self.match(field_name, field_value, **options)
return {} if not record else self.replace(record['id'], fields, typecast)
def delete(self, record_id):
"""
Deletes a record by its id
>>> record = airtable.match('Employee Id', 'DD13332454')
>>> airtable.delete(record['id'])
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Deleted Record
"""
record_url = self.record_url(record_id)
return self._delete(record_url)
def delete_by_field(self, field_name, field_value, **options):
"""
Deletes first record to match provided ``field_name`` and
``field_value``.
>>> record = airtable.delete_by_field('Employee Id', 'DD13332454')
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): Deleted Record
"""
record = self.match(field_name, field_value, **options)
record_url = self.record_url(record['id'])
return self._delete(record_url)
def batch_delete(self, record_ids):
"""
Calls :any:`delete` repetitively, following set API Rate Limit (5/sec)
To change the rate limit set value of ``airtable.API_LIMIT`` to
the time in seconds it should sleep before calling the function again.
>>> record_ids = ['recwPQIfs4wKPyc9D', 'recwDxIfs3wDPyc3F']
>>> airtable.batch_delete(records_ids)
Args:
records(``list``): Record Ids to delete
Returns:
records(``list``): list of records deleted
"""
return self._batch_request(self.delete, record_ids)
def mirror(self, records, **options):
"""
Deletes all records on table or view and replaces with records.
>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
>>> record = airtable.,mirror(records)
If view options are provided, only records visible on that view will
be deleted.
>>> record = airtable.mirror(records, view='View')
([{'id': 'recwPQIfs4wKPyc9D', ... }], [{'deleted': True, ... }])
Args:
records(``list``): Records to insert
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
Returns:
records (``tuple``): (new_records, deleted_records)
"""
all_record_ids = [r['id'] for r in self.get_all(**options)]
deleted_records = self.batch_delete(all_record_ids)
new_records = self.batch_insert(records)
return (new_records, deleted_records)
def __repr__(self):
return '<Airtable table:{}>'.format(self.table_name)
|
gtalarico/airtable-python-wrapper
|
airtable/airtable.py
|
Airtable.get
|
python
|
def get(self, record_id):
"""
Retrieves a record by its id
>>> record = airtable.get('recwPQIfs4wKPyc9D')
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Record
"""
record_url = self.record_url(record_id)
return self._get(record_url)
|
Retrieves a record by its id
>>> record = airtable.get('recwPQIfs4wKPyc9D')
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Record
|
train
|
https://github.com/gtalarico/airtable-python-wrapper/blob/48b2d806178085b52a31817571e5a1fc3dce4045/airtable/airtable.py#L202-L215
|
[
"def record_url(self, record_id):\n \"\"\" Builds URL with record id \"\"\"\n return posixpath.join(self.url_table, record_id)\n",
"def _get(self, url, **params):\n processed_params = self._process_params(params)\n return self._request('get', url, params=processed_params)\n"
] |
class Airtable():
VERSION = 'v0'
API_BASE_URL = 'https://api.airtable.com/'
API_LIMIT = 1.0 / 5 # 5 per second
API_URL = posixpath.join(API_BASE_URL, VERSION)
def __init__(self, base_key, table_name, api_key=None):
"""
If api_key is not provided, :any:`AirtableAuth` will attempt
to use ``os.environ['AIRTABLE_API_KEY']``
"""
session = requests.Session()
session.auth = AirtableAuth(api_key=api_key)
self.session = session
self.table_name = table_name
url_safe_table_name = quote(table_name, safe='')
self.url_table = posixpath.join(self.API_URL, base_key,
url_safe_table_name)
self.is_authenticated = self.validate_session(self.url_table)
def validate_session(self, url):
response = self.session.get(url, params={'maxRecords': 1})
if response.ok:
return True
elif response.status_code == 404:
raise ValueError('Invalid base or table name: {}'.format(url))
else:
raise ValueError(
'Authentication failed: {}'.format(response.reason))
def _process_params(self, params):
"""
Process params names or values as needed using filters
"""
new_params = OrderedDict()
for param_name, param_value in sorted(params.items()):
param_value = params[param_name]
ParamClass = AirtableParams._get(param_name)
new_params.update(ParamClass(param_value).to_param_dict())
return new_params
def _process_response(self, response):
try:
response.raise_for_status()
except requests.exceptions.HTTPError as exc:
err_msg = str(exc)
# Reports Decoded 422 Url for better troubleshooting
# Disabled in IronPython Bug:
# https://github.com/IronLanguages/ironpython2/issues/242
if not IS_IPY and response.status_code == 422:
err_msg = err_msg.replace(response.url, unquote(response.url))
err_msg += (' (Decoded URL)')
# Attempt to get Error message from response, Issue #16
try:
error_dict = response.json()
except json.decoder.JSONDecodeError:
pass
else:
if 'error' in error_dict:
err_msg += ' [Error: {}]'.format(error_dict['error'])
raise requests.exceptions.HTTPError(err_msg)
else:
return response.json()
def record_url(self, record_id):
""" Builds URL with record id """
return posixpath.join(self.url_table, record_id)
def _request(self, method, url, params=None, json_data=None):
response = self.session.request(method, url, params=params,
json=json_data)
return self._process_response(response)
def _get(self, url, **params):
processed_params = self._process_params(params)
return self._request('get', url, params=processed_params)
def _post(self, url, json_data):
return self._request('post', url, json_data=json_data)
def _put(self, url, json_data):
return self._request('put', url, json_data=json_data)
def _patch(self, url, json_data):
return self._request('patch', url, json_data=json_data)
def _delete(self, url):
return self._request('delete', url)
def get(self, record_id):
"""
Retrieves a record by its id
>>> record = airtable.get('recwPQIfs4wKPyc9D')
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Record
"""
record_url = self.record_url(record_id)
return self._get(record_url)
def get_iter(self, **options):
"""
Record Retriever Iterator
Returns iterator with lists in batches according to pageSize.
To get all records at once use :any:`get_all`
>>> for page in airtable.get_iter():
... for record in page:
... print(record)
[{'fields': ... }, ...]
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
page_size (``int``, optional ): The number of records returned
in each request. Must be less than or equal to 100.
Default is 100. See :any:`PageSizeParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
formula (``str``, optional): Airtable formula.
See :any:`FormulaParam`.
Returns:
iterator (``list``): List of Records, grouped by pageSize
"""
offset = None
while True:
data = self._get(self.url_table, offset=offset, **options)
records = data.get('records', [])
time.sleep(self.API_LIMIT)
yield records
offset = data.get('offset')
if not offset:
break
def get_all(self, **options):
"""
Retrieves all records repetitively and returns a single list.
>>> airtable.get_all()
>>> airtable.get_all(view='MyView', fields=['ColA', '-ColB'])
>>> airtable.get_all(maxRecords=50)
[{'fields': ... }, ...]
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
formula (``str``, optional): Airtable formula.
See :any:`FormulaParam`.
Returns:
records (``list``): List of Records
>>> records = get_all(maxRecords=3, view='All')
"""
all_records = []
for records in self.get_iter(**options):
all_records.extend(records)
return all_records
def match(self, field_name, field_value, **options):
"""
Returns first match found in :any:`get_all`
>>> airtable.match('Name', 'John')
{'fields': {'Name': 'John'} }
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): First record to match the field_value provided
"""
from_name_and_value = AirtableParams.FormulaParam.from_name_and_value
formula = from_name_and_value(field_name, field_value)
options['formula'] = formula
for record in self.get_all(**options):
return record
else:
return {}
def search(self, field_name, field_value, record=None, **options):
"""
Returns all matching records found in :any:`get_all`
>>> airtable.search('Gender', 'Male')
[{'fields': {'Name': 'John', 'Gender': 'Male'}, ... ]
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
records (``list``): All records that matched ``field_value``
"""
records = []
from_name_and_value = AirtableParams.FormulaParam.from_name_and_value
formula = from_name_and_value(field_name, field_value)
options['formula'] = formula
records = self.get_all(**options)
return records
def insert(self, fields, typecast=False):
"""
Inserts a record
>>> record = {'Name': 'John'}
>>> airtable.insert(record)
Args:
fields(``dict``): Fields to insert.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Inserted record
"""
return self._post(self.url_table, json_data={"fields": fields, "typecast": typecast})
def _batch_request(self, func, iterable):
""" Internal Function to limit batch calls to API limit """
responses = []
for item in iterable:
responses.append(func(item))
time.sleep(self.API_LIMIT)
return responses
def batch_insert(self, records, typecast=False):
"""
Calls :any:`insert` repetitively, following set API Rate Limit (5/sec)
To change the rate limit use ``airtable.API_LIMIT = 0.2``
(5 per second)
>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
>>> airtable.batch_insert(records)
Args:
records(``list``): Records to insert
typecast(``boolean``): Automatic data conversion from string values.
Returns:
records (``list``): list of added records
"""
return self._batch_request(self.insert, records)
def update(self, record_id, fields, typecast=False):
"""
Updates a record by its record id.
Only Fields passed are updated, the rest are left as is.
>>> record = airtable.match('Employee Id', 'DD13332454')
>>> fields = {'Status': 'Fired'}
>>> airtable.update(record['id'], fields)
Args:
record_id(``str``): Id of Record to update
fields(``dict``): Fields to update.
Must be dictionary with Column names as Key
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Updated record
"""
record_url = self.record_url(record_id)
return self._patch(record_url, json_data={"fields": fields, "typecast": typecast})
def update_by_field(self, field_name, field_value, fields, typecast=False, **options):
"""
Updates the first record to match field name and value.
Only Fields passed are updated, the rest are left as is.
>>> record = {'Name': 'John', 'Tel': '540-255-5522'}
>>> airtable.update_by_field('Name', 'John', record)
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
fields(``dict``): Fields to update.
Must be dictionary with Column names as Key
typecast(``boolean``): Automatic data conversion from string values.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): Updated record
"""
record = self.match(field_name, field_value, **options)
return {} if not record else self.update(record['id'], fields, typecast)
def replace(self, record_id, fields, typecast=False):
"""
Replaces a record by its record id.
All Fields are updated to match the new ``fields`` provided.
If a field is not included in ``fields``, value will bet set to null.
To update only selected fields, use :any:`update`.
>>> record = airtable.match('Seat Number', '22A')
>>> fields = {'PassangerName': 'Mike', 'Passport': 'YASD232-23'}
>>> airtable.replace(record['id'], fields)
Args:
record_id(``str``): Id of Record to update
fields(``dict``): Fields to replace with.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): New record
"""
record_url = self.record_url(record_id)
return self._put(record_url, json_data={"fields": fields, "typecast": typecast})
def replace_by_field(self, field_name, field_value, fields, typecast=False, **options):
"""
Replaces the first record to match field name and value.
All Fields are updated to match the new ``fields`` provided.
If a field is not included in ``fields``, value will bet set to null.
To update only selected fields, use :any:`update`.
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
fields(``dict``): Fields to replace with.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): New record
"""
record = self.match(field_name, field_value, **options)
return {} if not record else self.replace(record['id'], fields, typecast)
def delete(self, record_id):
"""
Deletes a record by its id
>>> record = airtable.match('Employee Id', 'DD13332454')
>>> airtable.delete(record['id'])
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Deleted Record
"""
record_url = self.record_url(record_id)
return self._delete(record_url)
def delete_by_field(self, field_name, field_value, **options):
"""
Deletes first record to match provided ``field_name`` and
``field_value``.
>>> record = airtable.delete_by_field('Employee Id', 'DD13332454')
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): Deleted Record
"""
record = self.match(field_name, field_value, **options)
record_url = self.record_url(record['id'])
return self._delete(record_url)
def batch_delete(self, record_ids):
"""
Calls :any:`delete` repetitively, following set API Rate Limit (5/sec)
To change the rate limit set value of ``airtable.API_LIMIT`` to
the time in seconds it should sleep before calling the function again.
>>> record_ids = ['recwPQIfs4wKPyc9D', 'recwDxIfs3wDPyc3F']
>>> airtable.batch_delete(records_ids)
Args:
records(``list``): Record Ids to delete
Returns:
records(``list``): list of records deleted
"""
return self._batch_request(self.delete, record_ids)
def mirror(self, records, **options):
"""
Deletes all records on table or view and replaces with records.
>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
>>> record = airtable.,mirror(records)
If view options are provided, only records visible on that view will
be deleted.
>>> record = airtable.mirror(records, view='View')
([{'id': 'recwPQIfs4wKPyc9D', ... }], [{'deleted': True, ... }])
Args:
records(``list``): Records to insert
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
Returns:
records (``tuple``): (new_records, deleted_records)
"""
all_record_ids = [r['id'] for r in self.get_all(**options)]
deleted_records = self.batch_delete(all_record_ids)
new_records = self.batch_insert(records)
return (new_records, deleted_records)
def __repr__(self):
return '<Airtable table:{}>'.format(self.table_name)
|
gtalarico/airtable-python-wrapper
|
airtable/airtable.py
|
Airtable.get_iter
|
python
|
def get_iter(self, **options):
"""
Record Retriever Iterator
Returns iterator with lists in batches according to pageSize.
To get all records at once use :any:`get_all`
>>> for page in airtable.get_iter():
... for record in page:
... print(record)
[{'fields': ... }, ...]
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
page_size (``int``, optional ): The number of records returned
in each request. Must be less than or equal to 100.
Default is 100. See :any:`PageSizeParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
formula (``str``, optional): Airtable formula.
See :any:`FormulaParam`.
Returns:
iterator (``list``): List of Records, grouped by pageSize
"""
offset = None
while True:
data = self._get(self.url_table, offset=offset, **options)
records = data.get('records', [])
time.sleep(self.API_LIMIT)
yield records
offset = data.get('offset')
if not offset:
break
|
Record Retriever Iterator
Returns iterator with lists in batches according to pageSize.
To get all records at once use :any:`get_all`
>>> for page in airtable.get_iter():
... for record in page:
... print(record)
[{'fields': ... }, ...]
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
page_size (``int``, optional ): The number of records returned
in each request. Must be less than or equal to 100.
Default is 100. See :any:`PageSizeParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
formula (``str``, optional): Airtable formula.
See :any:`FormulaParam`.
Returns:
iterator (``list``): List of Records, grouped by pageSize
|
train
|
https://github.com/gtalarico/airtable-python-wrapper/blob/48b2d806178085b52a31817571e5a1fc3dce4045/airtable/airtable.py#L217-L256
|
[
"def _get(self, url, **params):\n processed_params = self._process_params(params)\n return self._request('get', url, params=processed_params)\n"
] |
class Airtable():
VERSION = 'v0'
API_BASE_URL = 'https://api.airtable.com/'
API_LIMIT = 1.0 / 5 # 5 per second
API_URL = posixpath.join(API_BASE_URL, VERSION)
def __init__(self, base_key, table_name, api_key=None):
"""
If api_key is not provided, :any:`AirtableAuth` will attempt
to use ``os.environ['AIRTABLE_API_KEY']``
"""
session = requests.Session()
session.auth = AirtableAuth(api_key=api_key)
self.session = session
self.table_name = table_name
url_safe_table_name = quote(table_name, safe='')
self.url_table = posixpath.join(self.API_URL, base_key,
url_safe_table_name)
self.is_authenticated = self.validate_session(self.url_table)
def validate_session(self, url):
response = self.session.get(url, params={'maxRecords': 1})
if response.ok:
return True
elif response.status_code == 404:
raise ValueError('Invalid base or table name: {}'.format(url))
else:
raise ValueError(
'Authentication failed: {}'.format(response.reason))
def _process_params(self, params):
"""
Process params names or values as needed using filters
"""
new_params = OrderedDict()
for param_name, param_value in sorted(params.items()):
param_value = params[param_name]
ParamClass = AirtableParams._get(param_name)
new_params.update(ParamClass(param_value).to_param_dict())
return new_params
def _process_response(self, response):
try:
response.raise_for_status()
except requests.exceptions.HTTPError as exc:
err_msg = str(exc)
# Reports Decoded 422 Url for better troubleshooting
# Disabled in IronPython Bug:
# https://github.com/IronLanguages/ironpython2/issues/242
if not IS_IPY and response.status_code == 422:
err_msg = err_msg.replace(response.url, unquote(response.url))
err_msg += (' (Decoded URL)')
# Attempt to get Error message from response, Issue #16
try:
error_dict = response.json()
except json.decoder.JSONDecodeError:
pass
else:
if 'error' in error_dict:
err_msg += ' [Error: {}]'.format(error_dict['error'])
raise requests.exceptions.HTTPError(err_msg)
else:
return response.json()
def record_url(self, record_id):
""" Builds URL with record id """
return posixpath.join(self.url_table, record_id)
def _request(self, method, url, params=None, json_data=None):
response = self.session.request(method, url, params=params,
json=json_data)
return self._process_response(response)
def _get(self, url, **params):
processed_params = self._process_params(params)
return self._request('get', url, params=processed_params)
def _post(self, url, json_data):
return self._request('post', url, json_data=json_data)
def _put(self, url, json_data):
return self._request('put', url, json_data=json_data)
def _patch(self, url, json_data):
return self._request('patch', url, json_data=json_data)
def _delete(self, url):
return self._request('delete', url)
def get(self, record_id):
"""
Retrieves a record by its id
>>> record = airtable.get('recwPQIfs4wKPyc9D')
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Record
"""
record_url = self.record_url(record_id)
return self._get(record_url)
def get_iter(self, **options):
"""
Record Retriever Iterator
Returns iterator with lists in batches according to pageSize.
To get all records at once use :any:`get_all`
>>> for page in airtable.get_iter():
... for record in page:
... print(record)
[{'fields': ... }, ...]
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
page_size (``int``, optional ): The number of records returned
in each request. Must be less than or equal to 100.
Default is 100. See :any:`PageSizeParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
formula (``str``, optional): Airtable formula.
See :any:`FormulaParam`.
Returns:
iterator (``list``): List of Records, grouped by pageSize
"""
offset = None
while True:
data = self._get(self.url_table, offset=offset, **options)
records = data.get('records', [])
time.sleep(self.API_LIMIT)
yield records
offset = data.get('offset')
if not offset:
break
def get_all(self, **options):
"""
Retrieves all records repetitively and returns a single list.
>>> airtable.get_all()
>>> airtable.get_all(view='MyView', fields=['ColA', '-ColB'])
>>> airtable.get_all(maxRecords=50)
[{'fields': ... }, ...]
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
formula (``str``, optional): Airtable formula.
See :any:`FormulaParam`.
Returns:
records (``list``): List of Records
>>> records = get_all(maxRecords=3, view='All')
"""
all_records = []
for records in self.get_iter(**options):
all_records.extend(records)
return all_records
def match(self, field_name, field_value, **options):
"""
Returns first match found in :any:`get_all`
>>> airtable.match('Name', 'John')
{'fields': {'Name': 'John'} }
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): First record to match the field_value provided
"""
from_name_and_value = AirtableParams.FormulaParam.from_name_and_value
formula = from_name_and_value(field_name, field_value)
options['formula'] = formula
for record in self.get_all(**options):
return record
else:
return {}
def search(self, field_name, field_value, record=None, **options):
"""
Returns all matching records found in :any:`get_all`
>>> airtable.search('Gender', 'Male')
[{'fields': {'Name': 'John', 'Gender': 'Male'}, ... ]
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
records (``list``): All records that matched ``field_value``
"""
records = []
from_name_and_value = AirtableParams.FormulaParam.from_name_and_value
formula = from_name_and_value(field_name, field_value)
options['formula'] = formula
records = self.get_all(**options)
return records
def insert(self, fields, typecast=False):
"""
Inserts a record
>>> record = {'Name': 'John'}
>>> airtable.insert(record)
Args:
fields(``dict``): Fields to insert.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Inserted record
"""
return self._post(self.url_table, json_data={"fields": fields, "typecast": typecast})
def _batch_request(self, func, iterable):
""" Internal Function to limit batch calls to API limit """
responses = []
for item in iterable:
responses.append(func(item))
time.sleep(self.API_LIMIT)
return responses
def batch_insert(self, records, typecast=False):
"""
Calls :any:`insert` repetitively, following set API Rate Limit (5/sec)
To change the rate limit use ``airtable.API_LIMIT = 0.2``
(5 per second)
>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
>>> airtable.batch_insert(records)
Args:
records(``list``): Records to insert
typecast(``boolean``): Automatic data conversion from string values.
Returns:
records (``list``): list of added records
"""
return self._batch_request(self.insert, records)
def update(self, record_id, fields, typecast=False):
"""
Updates a record by its record id.
Only Fields passed are updated, the rest are left as is.
>>> record = airtable.match('Employee Id', 'DD13332454')
>>> fields = {'Status': 'Fired'}
>>> airtable.update(record['id'], fields)
Args:
record_id(``str``): Id of Record to update
fields(``dict``): Fields to update.
Must be dictionary with Column names as Key
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Updated record
"""
record_url = self.record_url(record_id)
return self._patch(record_url, json_data={"fields": fields, "typecast": typecast})
def update_by_field(self, field_name, field_value, fields, typecast=False, **options):
"""
Updates the first record to match field name and value.
Only Fields passed are updated, the rest are left as is.
>>> record = {'Name': 'John', 'Tel': '540-255-5522'}
>>> airtable.update_by_field('Name', 'John', record)
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
fields(``dict``): Fields to update.
Must be dictionary with Column names as Key
typecast(``boolean``): Automatic data conversion from string values.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): Updated record
"""
record = self.match(field_name, field_value, **options)
return {} if not record else self.update(record['id'], fields, typecast)
def replace(self, record_id, fields, typecast=False):
"""
Replaces a record by its record id.
All Fields are updated to match the new ``fields`` provided.
If a field is not included in ``fields``, value will bet set to null.
To update only selected fields, use :any:`update`.
>>> record = airtable.match('Seat Number', '22A')
>>> fields = {'PassangerName': 'Mike', 'Passport': 'YASD232-23'}
>>> airtable.replace(record['id'], fields)
Args:
record_id(``str``): Id of Record to update
fields(``dict``): Fields to replace with.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): New record
"""
record_url = self.record_url(record_id)
return self._put(record_url, json_data={"fields": fields, "typecast": typecast})
def replace_by_field(self, field_name, field_value, fields, typecast=False, **options):
"""
Replaces the first record to match field name and value.
All Fields are updated to match the new ``fields`` provided.
If a field is not included in ``fields``, value will bet set to null.
To update only selected fields, use :any:`update`.
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
fields(``dict``): Fields to replace with.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): New record
"""
record = self.match(field_name, field_value, **options)
return {} if not record else self.replace(record['id'], fields, typecast)
def delete(self, record_id):
"""
Deletes a record by its id
>>> record = airtable.match('Employee Id', 'DD13332454')
>>> airtable.delete(record['id'])
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Deleted Record
"""
record_url = self.record_url(record_id)
return self._delete(record_url)
def delete_by_field(self, field_name, field_value, **options):
"""
Deletes first record to match provided ``field_name`` and
``field_value``.
>>> record = airtable.delete_by_field('Employee Id', 'DD13332454')
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): Deleted Record
"""
record = self.match(field_name, field_value, **options)
record_url = self.record_url(record['id'])
return self._delete(record_url)
def batch_delete(self, record_ids):
"""
Calls :any:`delete` repetitively, following set API Rate Limit (5/sec)
To change the rate limit set value of ``airtable.API_LIMIT`` to
the time in seconds it should sleep before calling the function again.
>>> record_ids = ['recwPQIfs4wKPyc9D', 'recwDxIfs3wDPyc3F']
>>> airtable.batch_delete(records_ids)
Args:
records(``list``): Record Ids to delete
Returns:
records(``list``): list of records deleted
"""
return self._batch_request(self.delete, record_ids)
def mirror(self, records, **options):
"""
Deletes all records on table or view and replaces with records.
>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
>>> record = airtable.,mirror(records)
If view options are provided, only records visible on that view will
be deleted.
>>> record = airtable.mirror(records, view='View')
([{'id': 'recwPQIfs4wKPyc9D', ... }], [{'deleted': True, ... }])
Args:
records(``list``): Records to insert
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
Returns:
records (``tuple``): (new_records, deleted_records)
"""
all_record_ids = [r['id'] for r in self.get_all(**options)]
deleted_records = self.batch_delete(all_record_ids)
new_records = self.batch_insert(records)
return (new_records, deleted_records)
def __repr__(self):
return '<Airtable table:{}>'.format(self.table_name)
|
gtalarico/airtable-python-wrapper
|
airtable/airtable.py
|
Airtable.get_all
|
python
|
def get_all(self, **options):
"""
Retrieves all records repetitively and returns a single list.
>>> airtable.get_all()
>>> airtable.get_all(view='MyView', fields=['ColA', '-ColB'])
>>> airtable.get_all(maxRecords=50)
[{'fields': ... }, ...]
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
formula (``str``, optional): Airtable formula.
See :any:`FormulaParam`.
Returns:
records (``list``): List of Records
>>> records = get_all(maxRecords=3, view='All')
"""
all_records = []
for records in self.get_iter(**options):
all_records.extend(records)
return all_records
|
Retrieves all records repetitively and returns a single list.
>>> airtable.get_all()
>>> airtable.get_all(view='MyView', fields=['ColA', '-ColB'])
>>> airtable.get_all(maxRecords=50)
[{'fields': ... }, ...]
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
formula (``str``, optional): Airtable formula.
See :any:`FormulaParam`.
Returns:
records (``list``): List of Records
>>> records = get_all(maxRecords=3, view='All')
|
train
|
https://github.com/gtalarico/airtable-python-wrapper/blob/48b2d806178085b52a31817571e5a1fc3dce4045/airtable/airtable.py#L258-L288
|
[
"def get_iter(self, **options):\n \"\"\"\n Record Retriever Iterator\n\n Returns iterator with lists in batches according to pageSize.\n To get all records at once use :any:`get_all`\n\n >>> for page in airtable.get_iter():\n ... for record in page:\n ... print(record)\n [{'fields': ... }, ...]\n\n Keyword Args:\n max_records (``int``, optional): The maximum total number of\n records that will be returned. See :any:`MaxRecordsParam`\n view (``str``, optional): The name or ID of a view.\n See :any:`ViewParam`.\n page_size (``int``, optional ): The number of records returned\n in each request. Must be less than or equal to 100.\n Default is 100. See :any:`PageSizeParam`.\n fields (``str``, ``list``, optional): Name of field or fields to\n be retrieved. Default is all fields. See :any:`FieldsParam`.\n sort (``list``, optional): List of fields to sort by.\n Default order is ascending. See :any:`SortParam`.\n formula (``str``, optional): Airtable formula.\n See :any:`FormulaParam`.\n\n Returns:\n iterator (``list``): List of Records, grouped by pageSize\n\n \"\"\"\n offset = None\n while True:\n data = self._get(self.url_table, offset=offset, **options)\n records = data.get('records', [])\n time.sleep(self.API_LIMIT)\n yield records\n offset = data.get('offset')\n if not offset:\n break\n"
] |
class Airtable():
VERSION = 'v0'
API_BASE_URL = 'https://api.airtable.com/'
API_LIMIT = 1.0 / 5 # 5 per second
API_URL = posixpath.join(API_BASE_URL, VERSION)
def __init__(self, base_key, table_name, api_key=None):
"""
If api_key is not provided, :any:`AirtableAuth` will attempt
to use ``os.environ['AIRTABLE_API_KEY']``
"""
session = requests.Session()
session.auth = AirtableAuth(api_key=api_key)
self.session = session
self.table_name = table_name
url_safe_table_name = quote(table_name, safe='')
self.url_table = posixpath.join(self.API_URL, base_key,
url_safe_table_name)
self.is_authenticated = self.validate_session(self.url_table)
def validate_session(self, url):
response = self.session.get(url, params={'maxRecords': 1})
if response.ok:
return True
elif response.status_code == 404:
raise ValueError('Invalid base or table name: {}'.format(url))
else:
raise ValueError(
'Authentication failed: {}'.format(response.reason))
def _process_params(self, params):
"""
Process params names or values as needed using filters
"""
new_params = OrderedDict()
for param_name, param_value in sorted(params.items()):
param_value = params[param_name]
ParamClass = AirtableParams._get(param_name)
new_params.update(ParamClass(param_value).to_param_dict())
return new_params
def _process_response(self, response):
try:
response.raise_for_status()
except requests.exceptions.HTTPError as exc:
err_msg = str(exc)
# Reports Decoded 422 Url for better troubleshooting
# Disabled in IronPython Bug:
# https://github.com/IronLanguages/ironpython2/issues/242
if not IS_IPY and response.status_code == 422:
err_msg = err_msg.replace(response.url, unquote(response.url))
err_msg += (' (Decoded URL)')
# Attempt to get Error message from response, Issue #16
try:
error_dict = response.json()
except json.decoder.JSONDecodeError:
pass
else:
if 'error' in error_dict:
err_msg += ' [Error: {}]'.format(error_dict['error'])
raise requests.exceptions.HTTPError(err_msg)
else:
return response.json()
def record_url(self, record_id):
""" Builds URL with record id """
return posixpath.join(self.url_table, record_id)
def _request(self, method, url, params=None, json_data=None):
response = self.session.request(method, url, params=params,
json=json_data)
return self._process_response(response)
def _get(self, url, **params):
processed_params = self._process_params(params)
return self._request('get', url, params=processed_params)
def _post(self, url, json_data):
return self._request('post', url, json_data=json_data)
def _put(self, url, json_data):
return self._request('put', url, json_data=json_data)
def _patch(self, url, json_data):
return self._request('patch', url, json_data=json_data)
def _delete(self, url):
return self._request('delete', url)
def get(self, record_id):
"""
Retrieves a record by its id
>>> record = airtable.get('recwPQIfs4wKPyc9D')
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Record
"""
record_url = self.record_url(record_id)
return self._get(record_url)
def get_iter(self, **options):
"""
Record Retriever Iterator
Returns iterator with lists in batches according to pageSize.
To get all records at once use :any:`get_all`
>>> for page in airtable.get_iter():
... for record in page:
... print(record)
[{'fields': ... }, ...]
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
page_size (``int``, optional ): The number of records returned
in each request. Must be less than or equal to 100.
Default is 100. See :any:`PageSizeParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
formula (``str``, optional): Airtable formula.
See :any:`FormulaParam`.
Returns:
iterator (``list``): List of Records, grouped by pageSize
"""
offset = None
while True:
data = self._get(self.url_table, offset=offset, **options)
records = data.get('records', [])
time.sleep(self.API_LIMIT)
yield records
offset = data.get('offset')
if not offset:
break
def get_all(self, **options):
"""
Retrieves all records repetitively and returns a single list.
>>> airtable.get_all()
>>> airtable.get_all(view='MyView', fields=['ColA', '-ColB'])
>>> airtable.get_all(maxRecords=50)
[{'fields': ... }, ...]
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
formula (``str``, optional): Airtable formula.
See :any:`FormulaParam`.
Returns:
records (``list``): List of Records
>>> records = get_all(maxRecords=3, view='All')
"""
all_records = []
for records in self.get_iter(**options):
all_records.extend(records)
return all_records
def match(self, field_name, field_value, **options):
"""
Returns first match found in :any:`get_all`
>>> airtable.match('Name', 'John')
{'fields': {'Name': 'John'} }
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): First record to match the field_value provided
"""
from_name_and_value = AirtableParams.FormulaParam.from_name_and_value
formula = from_name_and_value(field_name, field_value)
options['formula'] = formula
for record in self.get_all(**options):
return record
else:
return {}
def search(self, field_name, field_value, record=None, **options):
"""
Returns all matching records found in :any:`get_all`
>>> airtable.search('Gender', 'Male')
[{'fields': {'Name': 'John', 'Gender': 'Male'}, ... ]
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
records (``list``): All records that matched ``field_value``
"""
records = []
from_name_and_value = AirtableParams.FormulaParam.from_name_and_value
formula = from_name_and_value(field_name, field_value)
options['formula'] = formula
records = self.get_all(**options)
return records
def insert(self, fields, typecast=False):
"""
Inserts a record
>>> record = {'Name': 'John'}
>>> airtable.insert(record)
Args:
fields(``dict``): Fields to insert.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Inserted record
"""
return self._post(self.url_table, json_data={"fields": fields, "typecast": typecast})
def _batch_request(self, func, iterable):
""" Internal Function to limit batch calls to API limit """
responses = []
for item in iterable:
responses.append(func(item))
time.sleep(self.API_LIMIT)
return responses
def batch_insert(self, records, typecast=False):
"""
Calls :any:`insert` repetitively, following set API Rate Limit (5/sec)
To change the rate limit use ``airtable.API_LIMIT = 0.2``
(5 per second)
>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
>>> airtable.batch_insert(records)
Args:
records(``list``): Records to insert
typecast(``boolean``): Automatic data conversion from string values.
Returns:
records (``list``): list of added records
"""
return self._batch_request(self.insert, records)
def update(self, record_id, fields, typecast=False):
"""
Updates a record by its record id.
Only Fields passed are updated, the rest are left as is.
>>> record = airtable.match('Employee Id', 'DD13332454')
>>> fields = {'Status': 'Fired'}
>>> airtable.update(record['id'], fields)
Args:
record_id(``str``): Id of Record to update
fields(``dict``): Fields to update.
Must be dictionary with Column names as Key
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Updated record
"""
record_url = self.record_url(record_id)
return self._patch(record_url, json_data={"fields": fields, "typecast": typecast})
def update_by_field(self, field_name, field_value, fields, typecast=False, **options):
"""
Updates the first record to match field name and value.
Only Fields passed are updated, the rest are left as is.
>>> record = {'Name': 'John', 'Tel': '540-255-5522'}
>>> airtable.update_by_field('Name', 'John', record)
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
fields(``dict``): Fields to update.
Must be dictionary with Column names as Key
typecast(``boolean``): Automatic data conversion from string values.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): Updated record
"""
record = self.match(field_name, field_value, **options)
return {} if not record else self.update(record['id'], fields, typecast)
def replace(self, record_id, fields, typecast=False):
"""
Replaces a record by its record id.
All Fields are updated to match the new ``fields`` provided.
If a field is not included in ``fields``, value will bet set to null.
To update only selected fields, use :any:`update`.
>>> record = airtable.match('Seat Number', '22A')
>>> fields = {'PassangerName': 'Mike', 'Passport': 'YASD232-23'}
>>> airtable.replace(record['id'], fields)
Args:
record_id(``str``): Id of Record to update
fields(``dict``): Fields to replace with.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): New record
"""
record_url = self.record_url(record_id)
return self._put(record_url, json_data={"fields": fields, "typecast": typecast})
def replace_by_field(self, field_name, field_value, fields, typecast=False, **options):
"""
Replaces the first record to match field name and value.
All Fields are updated to match the new ``fields`` provided.
If a field is not included in ``fields``, value will bet set to null.
To update only selected fields, use :any:`update`.
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
fields(``dict``): Fields to replace with.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): New record
"""
record = self.match(field_name, field_value, **options)
return {} if not record else self.replace(record['id'], fields, typecast)
def delete(self, record_id):
"""
Deletes a record by its id
>>> record = airtable.match('Employee Id', 'DD13332454')
>>> airtable.delete(record['id'])
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Deleted Record
"""
record_url = self.record_url(record_id)
return self._delete(record_url)
def delete_by_field(self, field_name, field_value, **options):
"""
Deletes first record to match provided ``field_name`` and
``field_value``.
>>> record = airtable.delete_by_field('Employee Id', 'DD13332454')
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): Deleted Record
"""
record = self.match(field_name, field_value, **options)
record_url = self.record_url(record['id'])
return self._delete(record_url)
def batch_delete(self, record_ids):
"""
Calls :any:`delete` repetitively, following set API Rate Limit (5/sec)
To change the rate limit set value of ``airtable.API_LIMIT`` to
the time in seconds it should sleep before calling the function again.
>>> record_ids = ['recwPQIfs4wKPyc9D', 'recwDxIfs3wDPyc3F']
>>> airtable.batch_delete(records_ids)
Args:
records(``list``): Record Ids to delete
Returns:
records(``list``): list of records deleted
"""
return self._batch_request(self.delete, record_ids)
def mirror(self, records, **options):
"""
Deletes all records on table or view and replaces with records.
>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
>>> record = airtable.,mirror(records)
If view options are provided, only records visible on that view will
be deleted.
>>> record = airtable.mirror(records, view='View')
([{'id': 'recwPQIfs4wKPyc9D', ... }], [{'deleted': True, ... }])
Args:
records(``list``): Records to insert
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
Returns:
records (``tuple``): (new_records, deleted_records)
"""
all_record_ids = [r['id'] for r in self.get_all(**options)]
deleted_records = self.batch_delete(all_record_ids)
new_records = self.batch_insert(records)
return (new_records, deleted_records)
def __repr__(self):
return '<Airtable table:{}>'.format(self.table_name)
|
gtalarico/airtable-python-wrapper
|
airtable/airtable.py
|
Airtable.match
|
python
|
def match(self, field_name, field_value, **options):
"""
Returns first match found in :any:`get_all`
>>> airtable.match('Name', 'John')
{'fields': {'Name': 'John'} }
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): First record to match the field_value provided
"""
from_name_and_value = AirtableParams.FormulaParam.from_name_and_value
formula = from_name_and_value(field_name, field_value)
options['formula'] = formula
for record in self.get_all(**options):
return record
else:
return {}
|
Returns first match found in :any:`get_all`
>>> airtable.match('Name', 'John')
{'fields': {'Name': 'John'} }
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): First record to match the field_value provided
|
train
|
https://github.com/gtalarico/airtable-python-wrapper/blob/48b2d806178085b52a31817571e5a1fc3dce4045/airtable/airtable.py#L290-L320
|
[
"def get_all(self, **options):\n \"\"\"\n Retrieves all records repetitively and returns a single list.\n\n >>> airtable.get_all()\n >>> airtable.get_all(view='MyView', fields=['ColA', '-ColB'])\n >>> airtable.get_all(maxRecords=50)\n [{'fields': ... }, ...]\n\n Keyword Args:\n max_records (``int``, optional): The maximum total number of\n records that will be returned. See :any:`MaxRecordsParam`\n view (``str``, optional): The name or ID of a view.\n See :any:`ViewParam`.\n fields (``str``, ``list``, optional): Name of field or fields to\n be retrieved. Default is all fields. See :any:`FieldsParam`.\n sort (``list``, optional): List of fields to sort by.\n Default order is ascending. See :any:`SortParam`.\n formula (``str``, optional): Airtable formula.\n See :any:`FormulaParam`.\n\n Returns:\n records (``list``): List of Records\n\n >>> records = get_all(maxRecords=3, view='All')\n\n \"\"\"\n all_records = []\n for records in self.get_iter(**options):\n all_records.extend(records)\n return all_records\n",
"def from_name_and_value(field_name, field_value):\n \"\"\"\n Creates a formula to match cells from from field_name and value\n \"\"\"\n if isinstance(field_value, str):\n field_value = \"'{}'\".format(field_value)\n\n formula = \"{{{name}}}={value}\".format(name=field_name,\n value=field_value)\n return formula\n"
] |
class Airtable():
VERSION = 'v0'
API_BASE_URL = 'https://api.airtable.com/'
API_LIMIT = 1.0 / 5 # 5 per second
API_URL = posixpath.join(API_BASE_URL, VERSION)
def __init__(self, base_key, table_name, api_key=None):
"""
If api_key is not provided, :any:`AirtableAuth` will attempt
to use ``os.environ['AIRTABLE_API_KEY']``
"""
session = requests.Session()
session.auth = AirtableAuth(api_key=api_key)
self.session = session
self.table_name = table_name
url_safe_table_name = quote(table_name, safe='')
self.url_table = posixpath.join(self.API_URL, base_key,
url_safe_table_name)
self.is_authenticated = self.validate_session(self.url_table)
def validate_session(self, url):
response = self.session.get(url, params={'maxRecords': 1})
if response.ok:
return True
elif response.status_code == 404:
raise ValueError('Invalid base or table name: {}'.format(url))
else:
raise ValueError(
'Authentication failed: {}'.format(response.reason))
def _process_params(self, params):
"""
Process params names or values as needed using filters
"""
new_params = OrderedDict()
for param_name, param_value in sorted(params.items()):
param_value = params[param_name]
ParamClass = AirtableParams._get(param_name)
new_params.update(ParamClass(param_value).to_param_dict())
return new_params
def _process_response(self, response):
try:
response.raise_for_status()
except requests.exceptions.HTTPError as exc:
err_msg = str(exc)
# Reports Decoded 422 Url for better troubleshooting
# Disabled in IronPython Bug:
# https://github.com/IronLanguages/ironpython2/issues/242
if not IS_IPY and response.status_code == 422:
err_msg = err_msg.replace(response.url, unquote(response.url))
err_msg += (' (Decoded URL)')
# Attempt to get Error message from response, Issue #16
try:
error_dict = response.json()
except json.decoder.JSONDecodeError:
pass
else:
if 'error' in error_dict:
err_msg += ' [Error: {}]'.format(error_dict['error'])
raise requests.exceptions.HTTPError(err_msg)
else:
return response.json()
def record_url(self, record_id):
""" Builds URL with record id """
return posixpath.join(self.url_table, record_id)
def _request(self, method, url, params=None, json_data=None):
response = self.session.request(method, url, params=params,
json=json_data)
return self._process_response(response)
def _get(self, url, **params):
processed_params = self._process_params(params)
return self._request('get', url, params=processed_params)
def _post(self, url, json_data):
return self._request('post', url, json_data=json_data)
def _put(self, url, json_data):
return self._request('put', url, json_data=json_data)
def _patch(self, url, json_data):
return self._request('patch', url, json_data=json_data)
def _delete(self, url):
return self._request('delete', url)
def get(self, record_id):
"""
Retrieves a record by its id
>>> record = airtable.get('recwPQIfs4wKPyc9D')
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Record
"""
record_url = self.record_url(record_id)
return self._get(record_url)
def get_iter(self, **options):
"""
Record Retriever Iterator
Returns iterator with lists in batches according to pageSize.
To get all records at once use :any:`get_all`
>>> for page in airtable.get_iter():
... for record in page:
... print(record)
[{'fields': ... }, ...]
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
page_size (``int``, optional ): The number of records returned
in each request. Must be less than or equal to 100.
Default is 100. See :any:`PageSizeParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
formula (``str``, optional): Airtable formula.
See :any:`FormulaParam`.
Returns:
iterator (``list``): List of Records, grouped by pageSize
"""
offset = None
while True:
data = self._get(self.url_table, offset=offset, **options)
records = data.get('records', [])
time.sleep(self.API_LIMIT)
yield records
offset = data.get('offset')
if not offset:
break
def get_all(self, **options):
"""
Retrieves all records repetitively and returns a single list.
>>> airtable.get_all()
>>> airtable.get_all(view='MyView', fields=['ColA', '-ColB'])
>>> airtable.get_all(maxRecords=50)
[{'fields': ... }, ...]
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
formula (``str``, optional): Airtable formula.
See :any:`FormulaParam`.
Returns:
records (``list``): List of Records
>>> records = get_all(maxRecords=3, view='All')
"""
all_records = []
for records in self.get_iter(**options):
all_records.extend(records)
return all_records
def match(self, field_name, field_value, **options):
"""
Returns first match found in :any:`get_all`
>>> airtable.match('Name', 'John')
{'fields': {'Name': 'John'} }
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): First record to match the field_value provided
"""
from_name_and_value = AirtableParams.FormulaParam.from_name_and_value
formula = from_name_and_value(field_name, field_value)
options['formula'] = formula
for record in self.get_all(**options):
return record
else:
return {}
def search(self, field_name, field_value, record=None, **options):
"""
Returns all matching records found in :any:`get_all`
>>> airtable.search('Gender', 'Male')
[{'fields': {'Name': 'John', 'Gender': 'Male'}, ... ]
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
records (``list``): All records that matched ``field_value``
"""
records = []
from_name_and_value = AirtableParams.FormulaParam.from_name_and_value
formula = from_name_and_value(field_name, field_value)
options['formula'] = formula
records = self.get_all(**options)
return records
def insert(self, fields, typecast=False):
"""
Inserts a record
>>> record = {'Name': 'John'}
>>> airtable.insert(record)
Args:
fields(``dict``): Fields to insert.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Inserted record
"""
return self._post(self.url_table, json_data={"fields": fields, "typecast": typecast})
def _batch_request(self, func, iterable):
""" Internal Function to limit batch calls to API limit """
responses = []
for item in iterable:
responses.append(func(item))
time.sleep(self.API_LIMIT)
return responses
def batch_insert(self, records, typecast=False):
"""
Calls :any:`insert` repetitively, following set API Rate Limit (5/sec)
To change the rate limit use ``airtable.API_LIMIT = 0.2``
(5 per second)
>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
>>> airtable.batch_insert(records)
Args:
records(``list``): Records to insert
typecast(``boolean``): Automatic data conversion from string values.
Returns:
records (``list``): list of added records
"""
return self._batch_request(self.insert, records)
def update(self, record_id, fields, typecast=False):
"""
Updates a record by its record id.
Only Fields passed are updated, the rest are left as is.
>>> record = airtable.match('Employee Id', 'DD13332454')
>>> fields = {'Status': 'Fired'}
>>> airtable.update(record['id'], fields)
Args:
record_id(``str``): Id of Record to update
fields(``dict``): Fields to update.
Must be dictionary with Column names as Key
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Updated record
"""
record_url = self.record_url(record_id)
return self._patch(record_url, json_data={"fields": fields, "typecast": typecast})
def update_by_field(self, field_name, field_value, fields, typecast=False, **options):
"""
Updates the first record to match field name and value.
Only Fields passed are updated, the rest are left as is.
>>> record = {'Name': 'John', 'Tel': '540-255-5522'}
>>> airtable.update_by_field('Name', 'John', record)
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
fields(``dict``): Fields to update.
Must be dictionary with Column names as Key
typecast(``boolean``): Automatic data conversion from string values.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): Updated record
"""
record = self.match(field_name, field_value, **options)
return {} if not record else self.update(record['id'], fields, typecast)
def replace(self, record_id, fields, typecast=False):
"""
Replaces a record by its record id.
All Fields are updated to match the new ``fields`` provided.
If a field is not included in ``fields``, value will bet set to null.
To update only selected fields, use :any:`update`.
>>> record = airtable.match('Seat Number', '22A')
>>> fields = {'PassangerName': 'Mike', 'Passport': 'YASD232-23'}
>>> airtable.replace(record['id'], fields)
Args:
record_id(``str``): Id of Record to update
fields(``dict``): Fields to replace with.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): New record
"""
record_url = self.record_url(record_id)
return self._put(record_url, json_data={"fields": fields, "typecast": typecast})
def replace_by_field(self, field_name, field_value, fields, typecast=False, **options):
"""
Replaces the first record to match field name and value.
All Fields are updated to match the new ``fields`` provided.
If a field is not included in ``fields``, value will bet set to null.
To update only selected fields, use :any:`update`.
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
fields(``dict``): Fields to replace with.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): New record
"""
record = self.match(field_name, field_value, **options)
return {} if not record else self.replace(record['id'], fields, typecast)
def delete(self, record_id):
"""
Deletes a record by its id
>>> record = airtable.match('Employee Id', 'DD13332454')
>>> airtable.delete(record['id'])
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Deleted Record
"""
record_url = self.record_url(record_id)
return self._delete(record_url)
def delete_by_field(self, field_name, field_value, **options):
"""
Deletes first record to match provided ``field_name`` and
``field_value``.
>>> record = airtable.delete_by_field('Employee Id', 'DD13332454')
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): Deleted Record
"""
record = self.match(field_name, field_value, **options)
record_url = self.record_url(record['id'])
return self._delete(record_url)
def batch_delete(self, record_ids):
"""
Calls :any:`delete` repetitively, following set API Rate Limit (5/sec)
To change the rate limit set value of ``airtable.API_LIMIT`` to
the time in seconds it should sleep before calling the function again.
>>> record_ids = ['recwPQIfs4wKPyc9D', 'recwDxIfs3wDPyc3F']
>>> airtable.batch_delete(records_ids)
Args:
records(``list``): Record Ids to delete
Returns:
records(``list``): list of records deleted
"""
return self._batch_request(self.delete, record_ids)
def mirror(self, records, **options):
"""
Deletes all records on table or view and replaces with records.
>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
>>> record = airtable.,mirror(records)
If view options are provided, only records visible on that view will
be deleted.
>>> record = airtable.mirror(records, view='View')
([{'id': 'recwPQIfs4wKPyc9D', ... }], [{'deleted': True, ... }])
Args:
records(``list``): Records to insert
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
Returns:
records (``tuple``): (new_records, deleted_records)
"""
all_record_ids = [r['id'] for r in self.get_all(**options)]
deleted_records = self.batch_delete(all_record_ids)
new_records = self.batch_insert(records)
return (new_records, deleted_records)
def __repr__(self):
return '<Airtable table:{}>'.format(self.table_name)
|
gtalarico/airtable-python-wrapper
|
airtable/airtable.py
|
Airtable.search
|
python
|
def search(self, field_name, field_value, record=None, **options):
"""
Returns all matching records found in :any:`get_all`
>>> airtable.search('Gender', 'Male')
[{'fields': {'Name': 'John', 'Gender': 'Male'}, ... ]
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
records (``list``): All records that matched ``field_value``
"""
records = []
from_name_and_value = AirtableParams.FormulaParam.from_name_and_value
formula = from_name_and_value(field_name, field_value)
options['formula'] = formula
records = self.get_all(**options)
return records
|
Returns all matching records found in :any:`get_all`
>>> airtable.search('Gender', 'Male')
[{'fields': {'Name': 'John', 'Gender': 'Male'}, ... ]
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
records (``list``): All records that matched ``field_value``
|
train
|
https://github.com/gtalarico/airtable-python-wrapper/blob/48b2d806178085b52a31817571e5a1fc3dce4045/airtable/airtable.py#L322-L352
|
[
"def get_all(self, **options):\n \"\"\"\n Retrieves all records repetitively and returns a single list.\n\n >>> airtable.get_all()\n >>> airtable.get_all(view='MyView', fields=['ColA', '-ColB'])\n >>> airtable.get_all(maxRecords=50)\n [{'fields': ... }, ...]\n\n Keyword Args:\n max_records (``int``, optional): The maximum total number of\n records that will be returned. See :any:`MaxRecordsParam`\n view (``str``, optional): The name or ID of a view.\n See :any:`ViewParam`.\n fields (``str``, ``list``, optional): Name of field or fields to\n be retrieved. Default is all fields. See :any:`FieldsParam`.\n sort (``list``, optional): List of fields to sort by.\n Default order is ascending. See :any:`SortParam`.\n formula (``str``, optional): Airtable formula.\n See :any:`FormulaParam`.\n\n Returns:\n records (``list``): List of Records\n\n >>> records = get_all(maxRecords=3, view='All')\n\n \"\"\"\n all_records = []\n for records in self.get_iter(**options):\n all_records.extend(records)\n return all_records\n",
"def from_name_and_value(field_name, field_value):\n \"\"\"\n Creates a formula to match cells from from field_name and value\n \"\"\"\n if isinstance(field_value, str):\n field_value = \"'{}'\".format(field_value)\n\n formula = \"{{{name}}}={value}\".format(name=field_name,\n value=field_value)\n return formula\n"
] |
class Airtable():
VERSION = 'v0'
API_BASE_URL = 'https://api.airtable.com/'
API_LIMIT = 1.0 / 5 # 5 per second
API_URL = posixpath.join(API_BASE_URL, VERSION)
def __init__(self, base_key, table_name, api_key=None):
"""
If api_key is not provided, :any:`AirtableAuth` will attempt
to use ``os.environ['AIRTABLE_API_KEY']``
"""
session = requests.Session()
session.auth = AirtableAuth(api_key=api_key)
self.session = session
self.table_name = table_name
url_safe_table_name = quote(table_name, safe='')
self.url_table = posixpath.join(self.API_URL, base_key,
url_safe_table_name)
self.is_authenticated = self.validate_session(self.url_table)
def validate_session(self, url):
response = self.session.get(url, params={'maxRecords': 1})
if response.ok:
return True
elif response.status_code == 404:
raise ValueError('Invalid base or table name: {}'.format(url))
else:
raise ValueError(
'Authentication failed: {}'.format(response.reason))
def _process_params(self, params):
"""
Process params names or values as needed using filters
"""
new_params = OrderedDict()
for param_name, param_value in sorted(params.items()):
param_value = params[param_name]
ParamClass = AirtableParams._get(param_name)
new_params.update(ParamClass(param_value).to_param_dict())
return new_params
def _process_response(self, response):
try:
response.raise_for_status()
except requests.exceptions.HTTPError as exc:
err_msg = str(exc)
# Reports Decoded 422 Url for better troubleshooting
# Disabled in IronPython Bug:
# https://github.com/IronLanguages/ironpython2/issues/242
if not IS_IPY and response.status_code == 422:
err_msg = err_msg.replace(response.url, unquote(response.url))
err_msg += (' (Decoded URL)')
# Attempt to get Error message from response, Issue #16
try:
error_dict = response.json()
except json.decoder.JSONDecodeError:
pass
else:
if 'error' in error_dict:
err_msg += ' [Error: {}]'.format(error_dict['error'])
raise requests.exceptions.HTTPError(err_msg)
else:
return response.json()
def record_url(self, record_id):
""" Builds URL with record id """
return posixpath.join(self.url_table, record_id)
def _request(self, method, url, params=None, json_data=None):
response = self.session.request(method, url, params=params,
json=json_data)
return self._process_response(response)
def _get(self, url, **params):
processed_params = self._process_params(params)
return self._request('get', url, params=processed_params)
def _post(self, url, json_data):
return self._request('post', url, json_data=json_data)
def _put(self, url, json_data):
return self._request('put', url, json_data=json_data)
def _patch(self, url, json_data):
return self._request('patch', url, json_data=json_data)
def _delete(self, url):
return self._request('delete', url)
def get(self, record_id):
"""
Retrieves a record by its id
>>> record = airtable.get('recwPQIfs4wKPyc9D')
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Record
"""
record_url = self.record_url(record_id)
return self._get(record_url)
def get_iter(self, **options):
"""
Record Retriever Iterator
Returns iterator with lists in batches according to pageSize.
To get all records at once use :any:`get_all`
>>> for page in airtable.get_iter():
... for record in page:
... print(record)
[{'fields': ... }, ...]
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
page_size (``int``, optional ): The number of records returned
in each request. Must be less than or equal to 100.
Default is 100. See :any:`PageSizeParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
formula (``str``, optional): Airtable formula.
See :any:`FormulaParam`.
Returns:
iterator (``list``): List of Records, grouped by pageSize
"""
offset = None
while True:
data = self._get(self.url_table, offset=offset, **options)
records = data.get('records', [])
time.sleep(self.API_LIMIT)
yield records
offset = data.get('offset')
if not offset:
break
def get_all(self, **options):
"""
Retrieves all records repetitively and returns a single list.
>>> airtable.get_all()
>>> airtable.get_all(view='MyView', fields=['ColA', '-ColB'])
>>> airtable.get_all(maxRecords=50)
[{'fields': ... }, ...]
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
formula (``str``, optional): Airtable formula.
See :any:`FormulaParam`.
Returns:
records (``list``): List of Records
>>> records = get_all(maxRecords=3, view='All')
"""
all_records = []
for records in self.get_iter(**options):
all_records.extend(records)
return all_records
def match(self, field_name, field_value, **options):
"""
Returns first match found in :any:`get_all`
>>> airtable.match('Name', 'John')
{'fields': {'Name': 'John'} }
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): First record to match the field_value provided
"""
from_name_and_value = AirtableParams.FormulaParam.from_name_and_value
formula = from_name_and_value(field_name, field_value)
options['formula'] = formula
for record in self.get_all(**options):
return record
else:
return {}
def search(self, field_name, field_value, record=None, **options):
"""
Returns all matching records found in :any:`get_all`
>>> airtable.search('Gender', 'Male')
[{'fields': {'Name': 'John', 'Gender': 'Male'}, ... ]
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
records (``list``): All records that matched ``field_value``
"""
records = []
from_name_and_value = AirtableParams.FormulaParam.from_name_and_value
formula = from_name_and_value(field_name, field_value)
options['formula'] = formula
records = self.get_all(**options)
return records
def insert(self, fields, typecast=False):
"""
Inserts a record
>>> record = {'Name': 'John'}
>>> airtable.insert(record)
Args:
fields(``dict``): Fields to insert.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Inserted record
"""
return self._post(self.url_table, json_data={"fields": fields, "typecast": typecast})
def _batch_request(self, func, iterable):
""" Internal Function to limit batch calls to API limit """
responses = []
for item in iterable:
responses.append(func(item))
time.sleep(self.API_LIMIT)
return responses
def batch_insert(self, records, typecast=False):
"""
Calls :any:`insert` repetitively, following set API Rate Limit (5/sec)
To change the rate limit use ``airtable.API_LIMIT = 0.2``
(5 per second)
>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
>>> airtable.batch_insert(records)
Args:
records(``list``): Records to insert
typecast(``boolean``): Automatic data conversion from string values.
Returns:
records (``list``): list of added records
"""
return self._batch_request(self.insert, records)
def update(self, record_id, fields, typecast=False):
"""
Updates a record by its record id.
Only Fields passed are updated, the rest are left as is.
>>> record = airtable.match('Employee Id', 'DD13332454')
>>> fields = {'Status': 'Fired'}
>>> airtable.update(record['id'], fields)
Args:
record_id(``str``): Id of Record to update
fields(``dict``): Fields to update.
Must be dictionary with Column names as Key
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Updated record
"""
record_url = self.record_url(record_id)
return self._patch(record_url, json_data={"fields": fields, "typecast": typecast})
def update_by_field(self, field_name, field_value, fields, typecast=False, **options):
"""
Updates the first record to match field name and value.
Only Fields passed are updated, the rest are left as is.
>>> record = {'Name': 'John', 'Tel': '540-255-5522'}
>>> airtable.update_by_field('Name', 'John', record)
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
fields(``dict``): Fields to update.
Must be dictionary with Column names as Key
typecast(``boolean``): Automatic data conversion from string values.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): Updated record
"""
record = self.match(field_name, field_value, **options)
return {} if not record else self.update(record['id'], fields, typecast)
def replace(self, record_id, fields, typecast=False):
"""
Replaces a record by its record id.
All Fields are updated to match the new ``fields`` provided.
If a field is not included in ``fields``, value will bet set to null.
To update only selected fields, use :any:`update`.
>>> record = airtable.match('Seat Number', '22A')
>>> fields = {'PassangerName': 'Mike', 'Passport': 'YASD232-23'}
>>> airtable.replace(record['id'], fields)
Args:
record_id(``str``): Id of Record to update
fields(``dict``): Fields to replace with.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): New record
"""
record_url = self.record_url(record_id)
return self._put(record_url, json_data={"fields": fields, "typecast": typecast})
def replace_by_field(self, field_name, field_value, fields, typecast=False, **options):
"""
Replaces the first record to match field name and value.
All Fields are updated to match the new ``fields`` provided.
If a field is not included in ``fields``, value will bet set to null.
To update only selected fields, use :any:`update`.
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
fields(``dict``): Fields to replace with.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): New record
"""
record = self.match(field_name, field_value, **options)
return {} if not record else self.replace(record['id'], fields, typecast)
def delete(self, record_id):
"""
Deletes a record by its id
>>> record = airtable.match('Employee Id', 'DD13332454')
>>> airtable.delete(record['id'])
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Deleted Record
"""
record_url = self.record_url(record_id)
return self._delete(record_url)
def delete_by_field(self, field_name, field_value, **options):
"""
Deletes first record to match provided ``field_name`` and
``field_value``.
>>> record = airtable.delete_by_field('Employee Id', 'DD13332454')
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): Deleted Record
"""
record = self.match(field_name, field_value, **options)
record_url = self.record_url(record['id'])
return self._delete(record_url)
def batch_delete(self, record_ids):
"""
Calls :any:`delete` repetitively, following set API Rate Limit (5/sec)
To change the rate limit set value of ``airtable.API_LIMIT`` to
the time in seconds it should sleep before calling the function again.
>>> record_ids = ['recwPQIfs4wKPyc9D', 'recwDxIfs3wDPyc3F']
>>> airtable.batch_delete(records_ids)
Args:
records(``list``): Record Ids to delete
Returns:
records(``list``): list of records deleted
"""
return self._batch_request(self.delete, record_ids)
def mirror(self, records, **options):
"""
Deletes all records on table or view and replaces with records.
>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
>>> record = airtable.,mirror(records)
If view options are provided, only records visible on that view will
be deleted.
>>> record = airtable.mirror(records, view='View')
([{'id': 'recwPQIfs4wKPyc9D', ... }], [{'deleted': True, ... }])
Args:
records(``list``): Records to insert
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
Returns:
records (``tuple``): (new_records, deleted_records)
"""
all_record_ids = [r['id'] for r in self.get_all(**options)]
deleted_records = self.batch_delete(all_record_ids)
new_records = self.batch_insert(records)
return (new_records, deleted_records)
def __repr__(self):
return '<Airtable table:{}>'.format(self.table_name)
|
gtalarico/airtable-python-wrapper
|
airtable/airtable.py
|
Airtable.insert
|
python
|
def insert(self, fields, typecast=False):
"""
Inserts a record
>>> record = {'Name': 'John'}
>>> airtable.insert(record)
Args:
fields(``dict``): Fields to insert.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Inserted record
"""
return self._post(self.url_table, json_data={"fields": fields, "typecast": typecast})
|
Inserts a record
>>> record = {'Name': 'John'}
>>> airtable.insert(record)
Args:
fields(``dict``): Fields to insert.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Inserted record
|
train
|
https://github.com/gtalarico/airtable-python-wrapper/blob/48b2d806178085b52a31817571e5a1fc3dce4045/airtable/airtable.py#L354-L370
|
[
"def _post(self, url, json_data):\n return self._request('post', url, json_data=json_data)\n"
] |
class Airtable():
VERSION = 'v0'
API_BASE_URL = 'https://api.airtable.com/'
API_LIMIT = 1.0 / 5 # 5 per second
API_URL = posixpath.join(API_BASE_URL, VERSION)
def __init__(self, base_key, table_name, api_key=None):
"""
If api_key is not provided, :any:`AirtableAuth` will attempt
to use ``os.environ['AIRTABLE_API_KEY']``
"""
session = requests.Session()
session.auth = AirtableAuth(api_key=api_key)
self.session = session
self.table_name = table_name
url_safe_table_name = quote(table_name, safe='')
self.url_table = posixpath.join(self.API_URL, base_key,
url_safe_table_name)
self.is_authenticated = self.validate_session(self.url_table)
def validate_session(self, url):
response = self.session.get(url, params={'maxRecords': 1})
if response.ok:
return True
elif response.status_code == 404:
raise ValueError('Invalid base or table name: {}'.format(url))
else:
raise ValueError(
'Authentication failed: {}'.format(response.reason))
def _process_params(self, params):
"""
Process params names or values as needed using filters
"""
new_params = OrderedDict()
for param_name, param_value in sorted(params.items()):
param_value = params[param_name]
ParamClass = AirtableParams._get(param_name)
new_params.update(ParamClass(param_value).to_param_dict())
return new_params
def _process_response(self, response):
try:
response.raise_for_status()
except requests.exceptions.HTTPError as exc:
err_msg = str(exc)
# Reports Decoded 422 Url for better troubleshooting
# Disabled in IronPython Bug:
# https://github.com/IronLanguages/ironpython2/issues/242
if not IS_IPY and response.status_code == 422:
err_msg = err_msg.replace(response.url, unquote(response.url))
err_msg += (' (Decoded URL)')
# Attempt to get Error message from response, Issue #16
try:
error_dict = response.json()
except json.decoder.JSONDecodeError:
pass
else:
if 'error' in error_dict:
err_msg += ' [Error: {}]'.format(error_dict['error'])
raise requests.exceptions.HTTPError(err_msg)
else:
return response.json()
def record_url(self, record_id):
""" Builds URL with record id """
return posixpath.join(self.url_table, record_id)
def _request(self, method, url, params=None, json_data=None):
response = self.session.request(method, url, params=params,
json=json_data)
return self._process_response(response)
def _get(self, url, **params):
processed_params = self._process_params(params)
return self._request('get', url, params=processed_params)
def _post(self, url, json_data):
return self._request('post', url, json_data=json_data)
def _put(self, url, json_data):
return self._request('put', url, json_data=json_data)
def _patch(self, url, json_data):
return self._request('patch', url, json_data=json_data)
def _delete(self, url):
return self._request('delete', url)
def get(self, record_id):
"""
Retrieves a record by its id
>>> record = airtable.get('recwPQIfs4wKPyc9D')
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Record
"""
record_url = self.record_url(record_id)
return self._get(record_url)
def get_iter(self, **options):
"""
Record Retriever Iterator
Returns iterator with lists in batches according to pageSize.
To get all records at once use :any:`get_all`
>>> for page in airtable.get_iter():
... for record in page:
... print(record)
[{'fields': ... }, ...]
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
page_size (``int``, optional ): The number of records returned
in each request. Must be less than or equal to 100.
Default is 100. See :any:`PageSizeParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
formula (``str``, optional): Airtable formula.
See :any:`FormulaParam`.
Returns:
iterator (``list``): List of Records, grouped by pageSize
"""
offset = None
while True:
data = self._get(self.url_table, offset=offset, **options)
records = data.get('records', [])
time.sleep(self.API_LIMIT)
yield records
offset = data.get('offset')
if not offset:
break
def get_all(self, **options):
"""
Retrieves all records repetitively and returns a single list.
>>> airtable.get_all()
>>> airtable.get_all(view='MyView', fields=['ColA', '-ColB'])
>>> airtable.get_all(maxRecords=50)
[{'fields': ... }, ...]
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
formula (``str``, optional): Airtable formula.
See :any:`FormulaParam`.
Returns:
records (``list``): List of Records
>>> records = get_all(maxRecords=3, view='All')
"""
all_records = []
for records in self.get_iter(**options):
all_records.extend(records)
return all_records
def match(self, field_name, field_value, **options):
"""
Returns first match found in :any:`get_all`
>>> airtable.match('Name', 'John')
{'fields': {'Name': 'John'} }
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): First record to match the field_value provided
"""
from_name_and_value = AirtableParams.FormulaParam.from_name_and_value
formula = from_name_and_value(field_name, field_value)
options['formula'] = formula
for record in self.get_all(**options):
return record
else:
return {}
def search(self, field_name, field_value, record=None, **options):
"""
Returns all matching records found in :any:`get_all`
>>> airtable.search('Gender', 'Male')
[{'fields': {'Name': 'John', 'Gender': 'Male'}, ... ]
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
records (``list``): All records that matched ``field_value``
"""
records = []
from_name_and_value = AirtableParams.FormulaParam.from_name_and_value
formula = from_name_and_value(field_name, field_value)
options['formula'] = formula
records = self.get_all(**options)
return records
def insert(self, fields, typecast=False):
"""
Inserts a record
>>> record = {'Name': 'John'}
>>> airtable.insert(record)
Args:
fields(``dict``): Fields to insert.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Inserted record
"""
return self._post(self.url_table, json_data={"fields": fields, "typecast": typecast})
def _batch_request(self, func, iterable):
""" Internal Function to limit batch calls to API limit """
responses = []
for item in iterable:
responses.append(func(item))
time.sleep(self.API_LIMIT)
return responses
def batch_insert(self, records, typecast=False):
"""
Calls :any:`insert` repetitively, following set API Rate Limit (5/sec)
To change the rate limit use ``airtable.API_LIMIT = 0.2``
(5 per second)
>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
>>> airtable.batch_insert(records)
Args:
records(``list``): Records to insert
typecast(``boolean``): Automatic data conversion from string values.
Returns:
records (``list``): list of added records
"""
return self._batch_request(self.insert, records)
def update(self, record_id, fields, typecast=False):
"""
Updates a record by its record id.
Only Fields passed are updated, the rest are left as is.
>>> record = airtable.match('Employee Id', 'DD13332454')
>>> fields = {'Status': 'Fired'}
>>> airtable.update(record['id'], fields)
Args:
record_id(``str``): Id of Record to update
fields(``dict``): Fields to update.
Must be dictionary with Column names as Key
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Updated record
"""
record_url = self.record_url(record_id)
return self._patch(record_url, json_data={"fields": fields, "typecast": typecast})
def update_by_field(self, field_name, field_value, fields, typecast=False, **options):
"""
Updates the first record to match field name and value.
Only Fields passed are updated, the rest are left as is.
>>> record = {'Name': 'John', 'Tel': '540-255-5522'}
>>> airtable.update_by_field('Name', 'John', record)
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
fields(``dict``): Fields to update.
Must be dictionary with Column names as Key
typecast(``boolean``): Automatic data conversion from string values.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): Updated record
"""
record = self.match(field_name, field_value, **options)
return {} if not record else self.update(record['id'], fields, typecast)
def replace(self, record_id, fields, typecast=False):
"""
Replaces a record by its record id.
All Fields are updated to match the new ``fields`` provided.
If a field is not included in ``fields``, value will bet set to null.
To update only selected fields, use :any:`update`.
>>> record = airtable.match('Seat Number', '22A')
>>> fields = {'PassangerName': 'Mike', 'Passport': 'YASD232-23'}
>>> airtable.replace(record['id'], fields)
Args:
record_id(``str``): Id of Record to update
fields(``dict``): Fields to replace with.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): New record
"""
record_url = self.record_url(record_id)
return self._put(record_url, json_data={"fields": fields, "typecast": typecast})
def replace_by_field(self, field_name, field_value, fields, typecast=False, **options):
"""
Replaces the first record to match field name and value.
All Fields are updated to match the new ``fields`` provided.
If a field is not included in ``fields``, value will bet set to null.
To update only selected fields, use :any:`update`.
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
fields(``dict``): Fields to replace with.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): New record
"""
record = self.match(field_name, field_value, **options)
return {} if not record else self.replace(record['id'], fields, typecast)
def delete(self, record_id):
"""
Deletes a record by its id
>>> record = airtable.match('Employee Id', 'DD13332454')
>>> airtable.delete(record['id'])
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Deleted Record
"""
record_url = self.record_url(record_id)
return self._delete(record_url)
def delete_by_field(self, field_name, field_value, **options):
"""
Deletes first record to match provided ``field_name`` and
``field_value``.
>>> record = airtable.delete_by_field('Employee Id', 'DD13332454')
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): Deleted Record
"""
record = self.match(field_name, field_value, **options)
record_url = self.record_url(record['id'])
return self._delete(record_url)
def batch_delete(self, record_ids):
"""
Calls :any:`delete` repetitively, following set API Rate Limit (5/sec)
To change the rate limit set value of ``airtable.API_LIMIT`` to
the time in seconds it should sleep before calling the function again.
>>> record_ids = ['recwPQIfs4wKPyc9D', 'recwDxIfs3wDPyc3F']
>>> airtable.batch_delete(records_ids)
Args:
records(``list``): Record Ids to delete
Returns:
records(``list``): list of records deleted
"""
return self._batch_request(self.delete, record_ids)
def mirror(self, records, **options):
"""
Deletes all records on table or view and replaces with records.
>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
>>> record = airtable.,mirror(records)
If view options are provided, only records visible on that view will
be deleted.
>>> record = airtable.mirror(records, view='View')
([{'id': 'recwPQIfs4wKPyc9D', ... }], [{'deleted': True, ... }])
Args:
records(``list``): Records to insert
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
Returns:
records (``tuple``): (new_records, deleted_records)
"""
all_record_ids = [r['id'] for r in self.get_all(**options)]
deleted_records = self.batch_delete(all_record_ids)
new_records = self.batch_insert(records)
return (new_records, deleted_records)
def __repr__(self):
return '<Airtable table:{}>'.format(self.table_name)
|
gtalarico/airtable-python-wrapper
|
airtable/airtable.py
|
Airtable._batch_request
|
python
|
def _batch_request(self, func, iterable):
""" Internal Function to limit batch calls to API limit """
responses = []
for item in iterable:
responses.append(func(item))
time.sleep(self.API_LIMIT)
return responses
|
Internal Function to limit batch calls to API limit
|
train
|
https://github.com/gtalarico/airtable-python-wrapper/blob/48b2d806178085b52a31817571e5a1fc3dce4045/airtable/airtable.py#L372-L378
|
[
"def insert(self, fields, typecast=False):\n \"\"\"\n Inserts a record\n\n >>> record = {'Name': 'John'}\n >>> airtable.insert(record)\n\n Args:\n fields(``dict``): Fields to insert.\n Must be dictionary with Column names as Key.\n typecast(``boolean``): Automatic data conversion from string values.\n\n Returns:\n record (``dict``): Inserted record\n\n \"\"\"\n return self._post(self.url_table, json_data={\"fields\": fields, \"typecast\": typecast})\n",
"def delete(self, record_id):\n \"\"\"\n Deletes a record by its id\n\n >>> record = airtable.match('Employee Id', 'DD13332454')\n >>> airtable.delete(record['id'])\n\n Args:\n record_id(``str``): Airtable record id\n\n Returns:\n record (``dict``): Deleted Record\n \"\"\"\n record_url = self.record_url(record_id)\n return self._delete(record_url)\n"
] |
class Airtable():
VERSION = 'v0'
API_BASE_URL = 'https://api.airtable.com/'
API_LIMIT = 1.0 / 5 # 5 per second
API_URL = posixpath.join(API_BASE_URL, VERSION)
def __init__(self, base_key, table_name, api_key=None):
"""
If api_key is not provided, :any:`AirtableAuth` will attempt
to use ``os.environ['AIRTABLE_API_KEY']``
"""
session = requests.Session()
session.auth = AirtableAuth(api_key=api_key)
self.session = session
self.table_name = table_name
url_safe_table_name = quote(table_name, safe='')
self.url_table = posixpath.join(self.API_URL, base_key,
url_safe_table_name)
self.is_authenticated = self.validate_session(self.url_table)
def validate_session(self, url):
response = self.session.get(url, params={'maxRecords': 1})
if response.ok:
return True
elif response.status_code == 404:
raise ValueError('Invalid base or table name: {}'.format(url))
else:
raise ValueError(
'Authentication failed: {}'.format(response.reason))
def _process_params(self, params):
"""
Process params names or values as needed using filters
"""
new_params = OrderedDict()
for param_name, param_value in sorted(params.items()):
param_value = params[param_name]
ParamClass = AirtableParams._get(param_name)
new_params.update(ParamClass(param_value).to_param_dict())
return new_params
def _process_response(self, response):
try:
response.raise_for_status()
except requests.exceptions.HTTPError as exc:
err_msg = str(exc)
# Reports Decoded 422 Url for better troubleshooting
# Disabled in IronPython Bug:
# https://github.com/IronLanguages/ironpython2/issues/242
if not IS_IPY and response.status_code == 422:
err_msg = err_msg.replace(response.url, unquote(response.url))
err_msg += (' (Decoded URL)')
# Attempt to get Error message from response, Issue #16
try:
error_dict = response.json()
except json.decoder.JSONDecodeError:
pass
else:
if 'error' in error_dict:
err_msg += ' [Error: {}]'.format(error_dict['error'])
raise requests.exceptions.HTTPError(err_msg)
else:
return response.json()
def record_url(self, record_id):
""" Builds URL with record id """
return posixpath.join(self.url_table, record_id)
def _request(self, method, url, params=None, json_data=None):
response = self.session.request(method, url, params=params,
json=json_data)
return self._process_response(response)
def _get(self, url, **params):
processed_params = self._process_params(params)
return self._request('get', url, params=processed_params)
def _post(self, url, json_data):
return self._request('post', url, json_data=json_data)
def _put(self, url, json_data):
return self._request('put', url, json_data=json_data)
def _patch(self, url, json_data):
return self._request('patch', url, json_data=json_data)
def _delete(self, url):
return self._request('delete', url)
def get(self, record_id):
"""
Retrieves a record by its id
>>> record = airtable.get('recwPQIfs4wKPyc9D')
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Record
"""
record_url = self.record_url(record_id)
return self._get(record_url)
def get_iter(self, **options):
"""
Record Retriever Iterator
Returns iterator with lists in batches according to pageSize.
To get all records at once use :any:`get_all`
>>> for page in airtable.get_iter():
... for record in page:
... print(record)
[{'fields': ... }, ...]
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
page_size (``int``, optional ): The number of records returned
in each request. Must be less than or equal to 100.
Default is 100. See :any:`PageSizeParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
formula (``str``, optional): Airtable formula.
See :any:`FormulaParam`.
Returns:
iterator (``list``): List of Records, grouped by pageSize
"""
offset = None
while True:
data = self._get(self.url_table, offset=offset, **options)
records = data.get('records', [])
time.sleep(self.API_LIMIT)
yield records
offset = data.get('offset')
if not offset:
break
def get_all(self, **options):
"""
Retrieves all records repetitively and returns a single list.
>>> airtable.get_all()
>>> airtable.get_all(view='MyView', fields=['ColA', '-ColB'])
>>> airtable.get_all(maxRecords=50)
[{'fields': ... }, ...]
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
formula (``str``, optional): Airtable formula.
See :any:`FormulaParam`.
Returns:
records (``list``): List of Records
>>> records = get_all(maxRecords=3, view='All')
"""
all_records = []
for records in self.get_iter(**options):
all_records.extend(records)
return all_records
def match(self, field_name, field_value, **options):
"""
Returns first match found in :any:`get_all`
>>> airtable.match('Name', 'John')
{'fields': {'Name': 'John'} }
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): First record to match the field_value provided
"""
from_name_and_value = AirtableParams.FormulaParam.from_name_and_value
formula = from_name_and_value(field_name, field_value)
options['formula'] = formula
for record in self.get_all(**options):
return record
else:
return {}
def search(self, field_name, field_value, record=None, **options):
"""
Returns all matching records found in :any:`get_all`
>>> airtable.search('Gender', 'Male')
[{'fields': {'Name': 'John', 'Gender': 'Male'}, ... ]
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
records (``list``): All records that matched ``field_value``
"""
records = []
from_name_and_value = AirtableParams.FormulaParam.from_name_and_value
formula = from_name_and_value(field_name, field_value)
options['formula'] = formula
records = self.get_all(**options)
return records
def insert(self, fields, typecast=False):
"""
Inserts a record
>>> record = {'Name': 'John'}
>>> airtable.insert(record)
Args:
fields(``dict``): Fields to insert.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Inserted record
"""
return self._post(self.url_table, json_data={"fields": fields, "typecast": typecast})
def _batch_request(self, func, iterable):
""" Internal Function to limit batch calls to API limit """
responses = []
for item in iterable:
responses.append(func(item))
time.sleep(self.API_LIMIT)
return responses
def batch_insert(self, records, typecast=False):
"""
Calls :any:`insert` repetitively, following set API Rate Limit (5/sec)
To change the rate limit use ``airtable.API_LIMIT = 0.2``
(5 per second)
>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
>>> airtable.batch_insert(records)
Args:
records(``list``): Records to insert
typecast(``boolean``): Automatic data conversion from string values.
Returns:
records (``list``): list of added records
"""
return self._batch_request(self.insert, records)
def update(self, record_id, fields, typecast=False):
"""
Updates a record by its record id.
Only Fields passed are updated, the rest are left as is.
>>> record = airtable.match('Employee Id', 'DD13332454')
>>> fields = {'Status': 'Fired'}
>>> airtable.update(record['id'], fields)
Args:
record_id(``str``): Id of Record to update
fields(``dict``): Fields to update.
Must be dictionary with Column names as Key
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Updated record
"""
record_url = self.record_url(record_id)
return self._patch(record_url, json_data={"fields": fields, "typecast": typecast})
def update_by_field(self, field_name, field_value, fields, typecast=False, **options):
"""
Updates the first record to match field name and value.
Only Fields passed are updated, the rest are left as is.
>>> record = {'Name': 'John', 'Tel': '540-255-5522'}
>>> airtable.update_by_field('Name', 'John', record)
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
fields(``dict``): Fields to update.
Must be dictionary with Column names as Key
typecast(``boolean``): Automatic data conversion from string values.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): Updated record
"""
record = self.match(field_name, field_value, **options)
return {} if not record else self.update(record['id'], fields, typecast)
def replace(self, record_id, fields, typecast=False):
"""
Replaces a record by its record id.
All Fields are updated to match the new ``fields`` provided.
If a field is not included in ``fields``, value will bet set to null.
To update only selected fields, use :any:`update`.
>>> record = airtable.match('Seat Number', '22A')
>>> fields = {'PassangerName': 'Mike', 'Passport': 'YASD232-23'}
>>> airtable.replace(record['id'], fields)
Args:
record_id(``str``): Id of Record to update
fields(``dict``): Fields to replace with.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): New record
"""
record_url = self.record_url(record_id)
return self._put(record_url, json_data={"fields": fields, "typecast": typecast})
def replace_by_field(self, field_name, field_value, fields, typecast=False, **options):
"""
Replaces the first record to match field name and value.
All Fields are updated to match the new ``fields`` provided.
If a field is not included in ``fields``, value will bet set to null.
To update only selected fields, use :any:`update`.
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
fields(``dict``): Fields to replace with.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): New record
"""
record = self.match(field_name, field_value, **options)
return {} if not record else self.replace(record['id'], fields, typecast)
def delete(self, record_id):
"""
Deletes a record by its id
>>> record = airtable.match('Employee Id', 'DD13332454')
>>> airtable.delete(record['id'])
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Deleted Record
"""
record_url = self.record_url(record_id)
return self._delete(record_url)
def delete_by_field(self, field_name, field_value, **options):
"""
Deletes first record to match provided ``field_name`` and
``field_value``.
>>> record = airtable.delete_by_field('Employee Id', 'DD13332454')
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): Deleted Record
"""
record = self.match(field_name, field_value, **options)
record_url = self.record_url(record['id'])
return self._delete(record_url)
def batch_delete(self, record_ids):
"""
Calls :any:`delete` repetitively, following set API Rate Limit (5/sec)
To change the rate limit set value of ``airtable.API_LIMIT`` to
the time in seconds it should sleep before calling the function again.
>>> record_ids = ['recwPQIfs4wKPyc9D', 'recwDxIfs3wDPyc3F']
>>> airtable.batch_delete(records_ids)
Args:
records(``list``): Record Ids to delete
Returns:
records(``list``): list of records deleted
"""
return self._batch_request(self.delete, record_ids)
def mirror(self, records, **options):
"""
Deletes all records on table or view and replaces with records.
>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
>>> record = airtable.,mirror(records)
If view options are provided, only records visible on that view will
be deleted.
>>> record = airtable.mirror(records, view='View')
([{'id': 'recwPQIfs4wKPyc9D', ... }], [{'deleted': True, ... }])
Args:
records(``list``): Records to insert
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
Returns:
records (``tuple``): (new_records, deleted_records)
"""
all_record_ids = [r['id'] for r in self.get_all(**options)]
deleted_records = self.batch_delete(all_record_ids)
new_records = self.batch_insert(records)
return (new_records, deleted_records)
def __repr__(self):
return '<Airtable table:{}>'.format(self.table_name)
|
gtalarico/airtable-python-wrapper
|
airtable/airtable.py
|
Airtable.batch_insert
|
python
|
def batch_insert(self, records, typecast=False):
"""
Calls :any:`insert` repetitively, following set API Rate Limit (5/sec)
To change the rate limit use ``airtable.API_LIMIT = 0.2``
(5 per second)
>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
>>> airtable.batch_insert(records)
Args:
records(``list``): Records to insert
typecast(``boolean``): Automatic data conversion from string values.
Returns:
records (``list``): list of added records
"""
return self._batch_request(self.insert, records)
|
Calls :any:`insert` repetitively, following set API Rate Limit (5/sec)
To change the rate limit use ``airtable.API_LIMIT = 0.2``
(5 per second)
>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
>>> airtable.batch_insert(records)
Args:
records(``list``): Records to insert
typecast(``boolean``): Automatic data conversion from string values.
Returns:
records (``list``): list of added records
|
train
|
https://github.com/gtalarico/airtable-python-wrapper/blob/48b2d806178085b52a31817571e5a1fc3dce4045/airtable/airtable.py#L380-L397
|
[
"def _batch_request(self, func, iterable):\n \"\"\" Internal Function to limit batch calls to API limit \"\"\"\n responses = []\n for item in iterable:\n responses.append(func(item))\n time.sleep(self.API_LIMIT)\n return responses\n"
] |
class Airtable():
VERSION = 'v0'
API_BASE_URL = 'https://api.airtable.com/'
API_LIMIT = 1.0 / 5 # 5 per second
API_URL = posixpath.join(API_BASE_URL, VERSION)
def __init__(self, base_key, table_name, api_key=None):
"""
If api_key is not provided, :any:`AirtableAuth` will attempt
to use ``os.environ['AIRTABLE_API_KEY']``
"""
session = requests.Session()
session.auth = AirtableAuth(api_key=api_key)
self.session = session
self.table_name = table_name
url_safe_table_name = quote(table_name, safe='')
self.url_table = posixpath.join(self.API_URL, base_key,
url_safe_table_name)
self.is_authenticated = self.validate_session(self.url_table)
def validate_session(self, url):
response = self.session.get(url, params={'maxRecords': 1})
if response.ok:
return True
elif response.status_code == 404:
raise ValueError('Invalid base or table name: {}'.format(url))
else:
raise ValueError(
'Authentication failed: {}'.format(response.reason))
def _process_params(self, params):
"""
Process params names or values as needed using filters
"""
new_params = OrderedDict()
for param_name, param_value in sorted(params.items()):
param_value = params[param_name]
ParamClass = AirtableParams._get(param_name)
new_params.update(ParamClass(param_value).to_param_dict())
return new_params
def _process_response(self, response):
try:
response.raise_for_status()
except requests.exceptions.HTTPError as exc:
err_msg = str(exc)
# Reports Decoded 422 Url for better troubleshooting
# Disabled in IronPython Bug:
# https://github.com/IronLanguages/ironpython2/issues/242
if not IS_IPY and response.status_code == 422:
err_msg = err_msg.replace(response.url, unquote(response.url))
err_msg += (' (Decoded URL)')
# Attempt to get Error message from response, Issue #16
try:
error_dict = response.json()
except json.decoder.JSONDecodeError:
pass
else:
if 'error' in error_dict:
err_msg += ' [Error: {}]'.format(error_dict['error'])
raise requests.exceptions.HTTPError(err_msg)
else:
return response.json()
def record_url(self, record_id):
""" Builds URL with record id """
return posixpath.join(self.url_table, record_id)
def _request(self, method, url, params=None, json_data=None):
response = self.session.request(method, url, params=params,
json=json_data)
return self._process_response(response)
def _get(self, url, **params):
processed_params = self._process_params(params)
return self._request('get', url, params=processed_params)
def _post(self, url, json_data):
return self._request('post', url, json_data=json_data)
def _put(self, url, json_data):
return self._request('put', url, json_data=json_data)
def _patch(self, url, json_data):
return self._request('patch', url, json_data=json_data)
def _delete(self, url):
return self._request('delete', url)
def get(self, record_id):
"""
Retrieves a record by its id
>>> record = airtable.get('recwPQIfs4wKPyc9D')
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Record
"""
record_url = self.record_url(record_id)
return self._get(record_url)
def get_iter(self, **options):
"""
Record Retriever Iterator
Returns iterator with lists in batches according to pageSize.
To get all records at once use :any:`get_all`
>>> for page in airtable.get_iter():
... for record in page:
... print(record)
[{'fields': ... }, ...]
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
page_size (``int``, optional ): The number of records returned
in each request. Must be less than or equal to 100.
Default is 100. See :any:`PageSizeParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
formula (``str``, optional): Airtable formula.
See :any:`FormulaParam`.
Returns:
iterator (``list``): List of Records, grouped by pageSize
"""
offset = None
while True:
data = self._get(self.url_table, offset=offset, **options)
records = data.get('records', [])
time.sleep(self.API_LIMIT)
yield records
offset = data.get('offset')
if not offset:
break
def get_all(self, **options):
"""
Retrieves all records repetitively and returns a single list.
>>> airtable.get_all()
>>> airtable.get_all(view='MyView', fields=['ColA', '-ColB'])
>>> airtable.get_all(maxRecords=50)
[{'fields': ... }, ...]
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
formula (``str``, optional): Airtable formula.
See :any:`FormulaParam`.
Returns:
records (``list``): List of Records
>>> records = get_all(maxRecords=3, view='All')
"""
all_records = []
for records in self.get_iter(**options):
all_records.extend(records)
return all_records
def match(self, field_name, field_value, **options):
"""
Returns first match found in :any:`get_all`
>>> airtable.match('Name', 'John')
{'fields': {'Name': 'John'} }
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): First record to match the field_value provided
"""
from_name_and_value = AirtableParams.FormulaParam.from_name_and_value
formula = from_name_and_value(field_name, field_value)
options['formula'] = formula
for record in self.get_all(**options):
return record
else:
return {}
def search(self, field_name, field_value, record=None, **options):
"""
Returns all matching records found in :any:`get_all`
>>> airtable.search('Gender', 'Male')
[{'fields': {'Name': 'John', 'Gender': 'Male'}, ... ]
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
records (``list``): All records that matched ``field_value``
"""
records = []
from_name_and_value = AirtableParams.FormulaParam.from_name_and_value
formula = from_name_and_value(field_name, field_value)
options['formula'] = formula
records = self.get_all(**options)
return records
def insert(self, fields, typecast=False):
"""
Inserts a record
>>> record = {'Name': 'John'}
>>> airtable.insert(record)
Args:
fields(``dict``): Fields to insert.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Inserted record
"""
return self._post(self.url_table, json_data={"fields": fields, "typecast": typecast})
def _batch_request(self, func, iterable):
""" Internal Function to limit batch calls to API limit """
responses = []
for item in iterable:
responses.append(func(item))
time.sleep(self.API_LIMIT)
return responses
def batch_insert(self, records, typecast=False):
"""
Calls :any:`insert` repetitively, following set API Rate Limit (5/sec)
To change the rate limit use ``airtable.API_LIMIT = 0.2``
(5 per second)
>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
>>> airtable.batch_insert(records)
Args:
records(``list``): Records to insert
typecast(``boolean``): Automatic data conversion from string values.
Returns:
records (``list``): list of added records
"""
return self._batch_request(self.insert, records)
def update(self, record_id, fields, typecast=False):
"""
Updates a record by its record id.
Only Fields passed are updated, the rest are left as is.
>>> record = airtable.match('Employee Id', 'DD13332454')
>>> fields = {'Status': 'Fired'}
>>> airtable.update(record['id'], fields)
Args:
record_id(``str``): Id of Record to update
fields(``dict``): Fields to update.
Must be dictionary with Column names as Key
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Updated record
"""
record_url = self.record_url(record_id)
return self._patch(record_url, json_data={"fields": fields, "typecast": typecast})
def update_by_field(self, field_name, field_value, fields, typecast=False, **options):
"""
Updates the first record to match field name and value.
Only Fields passed are updated, the rest are left as is.
>>> record = {'Name': 'John', 'Tel': '540-255-5522'}
>>> airtable.update_by_field('Name', 'John', record)
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
fields(``dict``): Fields to update.
Must be dictionary with Column names as Key
typecast(``boolean``): Automatic data conversion from string values.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): Updated record
"""
record = self.match(field_name, field_value, **options)
return {} if not record else self.update(record['id'], fields, typecast)
def replace(self, record_id, fields, typecast=False):
"""
Replaces a record by its record id.
All Fields are updated to match the new ``fields`` provided.
If a field is not included in ``fields``, value will bet set to null.
To update only selected fields, use :any:`update`.
>>> record = airtable.match('Seat Number', '22A')
>>> fields = {'PassangerName': 'Mike', 'Passport': 'YASD232-23'}
>>> airtable.replace(record['id'], fields)
Args:
record_id(``str``): Id of Record to update
fields(``dict``): Fields to replace with.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): New record
"""
record_url = self.record_url(record_id)
return self._put(record_url, json_data={"fields": fields, "typecast": typecast})
def replace_by_field(self, field_name, field_value, fields, typecast=False, **options):
"""
Replaces the first record to match field name and value.
All Fields are updated to match the new ``fields`` provided.
If a field is not included in ``fields``, value will bet set to null.
To update only selected fields, use :any:`update`.
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
fields(``dict``): Fields to replace with.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): New record
"""
record = self.match(field_name, field_value, **options)
return {} if not record else self.replace(record['id'], fields, typecast)
def delete(self, record_id):
"""
Deletes a record by its id
>>> record = airtable.match('Employee Id', 'DD13332454')
>>> airtable.delete(record['id'])
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Deleted Record
"""
record_url = self.record_url(record_id)
return self._delete(record_url)
def delete_by_field(self, field_name, field_value, **options):
"""
Deletes first record to match provided ``field_name`` and
``field_value``.
>>> record = airtable.delete_by_field('Employee Id', 'DD13332454')
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): Deleted Record
"""
record = self.match(field_name, field_value, **options)
record_url = self.record_url(record['id'])
return self._delete(record_url)
def batch_delete(self, record_ids):
"""
Calls :any:`delete` repetitively, following set API Rate Limit (5/sec)
To change the rate limit set value of ``airtable.API_LIMIT`` to
the time in seconds it should sleep before calling the function again.
>>> record_ids = ['recwPQIfs4wKPyc9D', 'recwDxIfs3wDPyc3F']
>>> airtable.batch_delete(records_ids)
Args:
records(``list``): Record Ids to delete
Returns:
records(``list``): list of records deleted
"""
return self._batch_request(self.delete, record_ids)
def mirror(self, records, **options):
"""
Deletes all records on table or view and replaces with records.
>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
>>> record = airtable.,mirror(records)
If view options are provided, only records visible on that view will
be deleted.
>>> record = airtable.mirror(records, view='View')
([{'id': 'recwPQIfs4wKPyc9D', ... }], [{'deleted': True, ... }])
Args:
records(``list``): Records to insert
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
Returns:
records (``tuple``): (new_records, deleted_records)
"""
all_record_ids = [r['id'] for r in self.get_all(**options)]
deleted_records = self.batch_delete(all_record_ids)
new_records = self.batch_insert(records)
return (new_records, deleted_records)
def __repr__(self):
return '<Airtable table:{}>'.format(self.table_name)
|
gtalarico/airtable-python-wrapper
|
airtable/airtable.py
|
Airtable.update
|
python
|
def update(self, record_id, fields, typecast=False):
"""
Updates a record by its record id.
Only Fields passed are updated, the rest are left as is.
>>> record = airtable.match('Employee Id', 'DD13332454')
>>> fields = {'Status': 'Fired'}
>>> airtable.update(record['id'], fields)
Args:
record_id(``str``): Id of Record to update
fields(``dict``): Fields to update.
Must be dictionary with Column names as Key
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Updated record
"""
record_url = self.record_url(record_id)
return self._patch(record_url, json_data={"fields": fields, "typecast": typecast})
|
Updates a record by its record id.
Only Fields passed are updated, the rest are left as is.
>>> record = airtable.match('Employee Id', 'DD13332454')
>>> fields = {'Status': 'Fired'}
>>> airtable.update(record['id'], fields)
Args:
record_id(``str``): Id of Record to update
fields(``dict``): Fields to update.
Must be dictionary with Column names as Key
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Updated record
|
train
|
https://github.com/gtalarico/airtable-python-wrapper/blob/48b2d806178085b52a31817571e5a1fc3dce4045/airtable/airtable.py#L399-L418
|
[
"def record_url(self, record_id):\n \"\"\" Builds URL with record id \"\"\"\n return posixpath.join(self.url_table, record_id)\n",
"def _patch(self, url, json_data):\n return self._request('patch', url, json_data=json_data)\n"
] |
class Airtable():
VERSION = 'v0'
API_BASE_URL = 'https://api.airtable.com/'
API_LIMIT = 1.0 / 5 # 5 per second
API_URL = posixpath.join(API_BASE_URL, VERSION)
def __init__(self, base_key, table_name, api_key=None):
"""
If api_key is not provided, :any:`AirtableAuth` will attempt
to use ``os.environ['AIRTABLE_API_KEY']``
"""
session = requests.Session()
session.auth = AirtableAuth(api_key=api_key)
self.session = session
self.table_name = table_name
url_safe_table_name = quote(table_name, safe='')
self.url_table = posixpath.join(self.API_URL, base_key,
url_safe_table_name)
self.is_authenticated = self.validate_session(self.url_table)
def validate_session(self, url):
response = self.session.get(url, params={'maxRecords': 1})
if response.ok:
return True
elif response.status_code == 404:
raise ValueError('Invalid base or table name: {}'.format(url))
else:
raise ValueError(
'Authentication failed: {}'.format(response.reason))
def _process_params(self, params):
"""
Process params names or values as needed using filters
"""
new_params = OrderedDict()
for param_name, param_value in sorted(params.items()):
param_value = params[param_name]
ParamClass = AirtableParams._get(param_name)
new_params.update(ParamClass(param_value).to_param_dict())
return new_params
def _process_response(self, response):
try:
response.raise_for_status()
except requests.exceptions.HTTPError as exc:
err_msg = str(exc)
# Reports Decoded 422 Url for better troubleshooting
# Disabled in IronPython Bug:
# https://github.com/IronLanguages/ironpython2/issues/242
if not IS_IPY and response.status_code == 422:
err_msg = err_msg.replace(response.url, unquote(response.url))
err_msg += (' (Decoded URL)')
# Attempt to get Error message from response, Issue #16
try:
error_dict = response.json()
except json.decoder.JSONDecodeError:
pass
else:
if 'error' in error_dict:
err_msg += ' [Error: {}]'.format(error_dict['error'])
raise requests.exceptions.HTTPError(err_msg)
else:
return response.json()
def record_url(self, record_id):
""" Builds URL with record id """
return posixpath.join(self.url_table, record_id)
def _request(self, method, url, params=None, json_data=None):
response = self.session.request(method, url, params=params,
json=json_data)
return self._process_response(response)
def _get(self, url, **params):
processed_params = self._process_params(params)
return self._request('get', url, params=processed_params)
def _post(self, url, json_data):
return self._request('post', url, json_data=json_data)
def _put(self, url, json_data):
return self._request('put', url, json_data=json_data)
def _patch(self, url, json_data):
return self._request('patch', url, json_data=json_data)
def _delete(self, url):
return self._request('delete', url)
def get(self, record_id):
"""
Retrieves a record by its id
>>> record = airtable.get('recwPQIfs4wKPyc9D')
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Record
"""
record_url = self.record_url(record_id)
return self._get(record_url)
def get_iter(self, **options):
"""
Record Retriever Iterator
Returns iterator with lists in batches according to pageSize.
To get all records at once use :any:`get_all`
>>> for page in airtable.get_iter():
... for record in page:
... print(record)
[{'fields': ... }, ...]
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
page_size (``int``, optional ): The number of records returned
in each request. Must be less than or equal to 100.
Default is 100. See :any:`PageSizeParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
formula (``str``, optional): Airtable formula.
See :any:`FormulaParam`.
Returns:
iterator (``list``): List of Records, grouped by pageSize
"""
offset = None
while True:
data = self._get(self.url_table, offset=offset, **options)
records = data.get('records', [])
time.sleep(self.API_LIMIT)
yield records
offset = data.get('offset')
if not offset:
break
def get_all(self, **options):
"""
Retrieves all records repetitively and returns a single list.
>>> airtable.get_all()
>>> airtable.get_all(view='MyView', fields=['ColA', '-ColB'])
>>> airtable.get_all(maxRecords=50)
[{'fields': ... }, ...]
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
formula (``str``, optional): Airtable formula.
See :any:`FormulaParam`.
Returns:
records (``list``): List of Records
>>> records = get_all(maxRecords=3, view='All')
"""
all_records = []
for records in self.get_iter(**options):
all_records.extend(records)
return all_records
def match(self, field_name, field_value, **options):
"""
Returns first match found in :any:`get_all`
>>> airtable.match('Name', 'John')
{'fields': {'Name': 'John'} }
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): First record to match the field_value provided
"""
from_name_and_value = AirtableParams.FormulaParam.from_name_and_value
formula = from_name_and_value(field_name, field_value)
options['formula'] = formula
for record in self.get_all(**options):
return record
else:
return {}
def search(self, field_name, field_value, record=None, **options):
"""
Returns all matching records found in :any:`get_all`
>>> airtable.search('Gender', 'Male')
[{'fields': {'Name': 'John', 'Gender': 'Male'}, ... ]
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
records (``list``): All records that matched ``field_value``
"""
records = []
from_name_and_value = AirtableParams.FormulaParam.from_name_and_value
formula = from_name_and_value(field_name, field_value)
options['formula'] = formula
records = self.get_all(**options)
return records
def insert(self, fields, typecast=False):
"""
Inserts a record
>>> record = {'Name': 'John'}
>>> airtable.insert(record)
Args:
fields(``dict``): Fields to insert.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Inserted record
"""
return self._post(self.url_table, json_data={"fields": fields, "typecast": typecast})
def _batch_request(self, func, iterable):
""" Internal Function to limit batch calls to API limit """
responses = []
for item in iterable:
responses.append(func(item))
time.sleep(self.API_LIMIT)
return responses
def batch_insert(self, records, typecast=False):
"""
Calls :any:`insert` repetitively, following set API Rate Limit (5/sec)
To change the rate limit use ``airtable.API_LIMIT = 0.2``
(5 per second)
>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
>>> airtable.batch_insert(records)
Args:
records(``list``): Records to insert
typecast(``boolean``): Automatic data conversion from string values.
Returns:
records (``list``): list of added records
"""
return self._batch_request(self.insert, records)
def update(self, record_id, fields, typecast=False):
"""
Updates a record by its record id.
Only Fields passed are updated, the rest are left as is.
>>> record = airtable.match('Employee Id', 'DD13332454')
>>> fields = {'Status': 'Fired'}
>>> airtable.update(record['id'], fields)
Args:
record_id(``str``): Id of Record to update
fields(``dict``): Fields to update.
Must be dictionary with Column names as Key
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Updated record
"""
record_url = self.record_url(record_id)
return self._patch(record_url, json_data={"fields": fields, "typecast": typecast})
def update_by_field(self, field_name, field_value, fields, typecast=False, **options):
"""
Updates the first record to match field name and value.
Only Fields passed are updated, the rest are left as is.
>>> record = {'Name': 'John', 'Tel': '540-255-5522'}
>>> airtable.update_by_field('Name', 'John', record)
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
fields(``dict``): Fields to update.
Must be dictionary with Column names as Key
typecast(``boolean``): Automatic data conversion from string values.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): Updated record
"""
record = self.match(field_name, field_value, **options)
return {} if not record else self.update(record['id'], fields, typecast)
def replace(self, record_id, fields, typecast=False):
"""
Replaces a record by its record id.
All Fields are updated to match the new ``fields`` provided.
If a field is not included in ``fields``, value will bet set to null.
To update only selected fields, use :any:`update`.
>>> record = airtable.match('Seat Number', '22A')
>>> fields = {'PassangerName': 'Mike', 'Passport': 'YASD232-23'}
>>> airtable.replace(record['id'], fields)
Args:
record_id(``str``): Id of Record to update
fields(``dict``): Fields to replace with.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): New record
"""
record_url = self.record_url(record_id)
return self._put(record_url, json_data={"fields": fields, "typecast": typecast})
def replace_by_field(self, field_name, field_value, fields, typecast=False, **options):
"""
Replaces the first record to match field name and value.
All Fields are updated to match the new ``fields`` provided.
If a field is not included in ``fields``, value will bet set to null.
To update only selected fields, use :any:`update`.
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
fields(``dict``): Fields to replace with.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): New record
"""
record = self.match(field_name, field_value, **options)
return {} if not record else self.replace(record['id'], fields, typecast)
def delete(self, record_id):
"""
Deletes a record by its id
>>> record = airtable.match('Employee Id', 'DD13332454')
>>> airtable.delete(record['id'])
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Deleted Record
"""
record_url = self.record_url(record_id)
return self._delete(record_url)
def delete_by_field(self, field_name, field_value, **options):
"""
Deletes first record to match provided ``field_name`` and
``field_value``.
>>> record = airtable.delete_by_field('Employee Id', 'DD13332454')
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): Deleted Record
"""
record = self.match(field_name, field_value, **options)
record_url = self.record_url(record['id'])
return self._delete(record_url)
def batch_delete(self, record_ids):
"""
Calls :any:`delete` repetitively, following set API Rate Limit (5/sec)
To change the rate limit set value of ``airtable.API_LIMIT`` to
the time in seconds it should sleep before calling the function again.
>>> record_ids = ['recwPQIfs4wKPyc9D', 'recwDxIfs3wDPyc3F']
>>> airtable.batch_delete(records_ids)
Args:
records(``list``): Record Ids to delete
Returns:
records(``list``): list of records deleted
"""
return self._batch_request(self.delete, record_ids)
def mirror(self, records, **options):
"""
Deletes all records on table or view and replaces with records.
>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
>>> record = airtable.,mirror(records)
If view options are provided, only records visible on that view will
be deleted.
>>> record = airtable.mirror(records, view='View')
([{'id': 'recwPQIfs4wKPyc9D', ... }], [{'deleted': True, ... }])
Args:
records(``list``): Records to insert
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
Returns:
records (``tuple``): (new_records, deleted_records)
"""
all_record_ids = [r['id'] for r in self.get_all(**options)]
deleted_records = self.batch_delete(all_record_ids)
new_records = self.batch_insert(records)
return (new_records, deleted_records)
def __repr__(self):
return '<Airtable table:{}>'.format(self.table_name)
|
gtalarico/airtable-python-wrapper
|
airtable/airtable.py
|
Airtable.update_by_field
|
python
|
def update_by_field(self, field_name, field_value, fields, typecast=False, **options):
"""
Updates the first record to match field name and value.
Only Fields passed are updated, the rest are left as is.
>>> record = {'Name': 'John', 'Tel': '540-255-5522'}
>>> airtable.update_by_field('Name', 'John', record)
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
fields(``dict``): Fields to update.
Must be dictionary with Column names as Key
typecast(``boolean``): Automatic data conversion from string values.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): Updated record
"""
record = self.match(field_name, field_value, **options)
return {} if not record else self.update(record['id'], fields, typecast)
|
Updates the first record to match field name and value.
Only Fields passed are updated, the rest are left as is.
>>> record = {'Name': 'John', 'Tel': '540-255-5522'}
>>> airtable.update_by_field('Name', 'John', record)
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
fields(``dict``): Fields to update.
Must be dictionary with Column names as Key
typecast(``boolean``): Automatic data conversion from string values.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): Updated record
|
train
|
https://github.com/gtalarico/airtable-python-wrapper/blob/48b2d806178085b52a31817571e5a1fc3dce4045/airtable/airtable.py#L420-L445
|
[
"def match(self, field_name, field_value, **options):\n \"\"\"\n Returns first match found in :any:`get_all`\n\n >>> airtable.match('Name', 'John')\n {'fields': {'Name': 'John'} }\n\n Args:\n field_name (``str``): Name of field to match (column name).\n field_value (``str``): Value of field to match.\n\n Keyword Args:\n max_records (``int``, optional): The maximum total number of\n records that will be returned. See :any:`MaxRecordsParam`\n view (``str``, optional): The name or ID of a view.\n See :any:`ViewParam`.\n fields (``str``, ``list``, optional): Name of field or fields to\n be retrieved. Default is all fields. See :any:`FieldsParam`.\n sort (``list``, optional): List of fields to sort by.\n Default order is ascending. See :any:`SortParam`.\n\n Returns:\n record (``dict``): First record to match the field_value provided\n \"\"\"\n from_name_and_value = AirtableParams.FormulaParam.from_name_and_value\n formula = from_name_and_value(field_name, field_value)\n options['formula'] = formula\n for record in self.get_all(**options):\n return record\n",
"def update(self, record_id, fields, typecast=False):\n \"\"\"\n Updates a record by its record id.\n Only Fields passed are updated, the rest are left as is.\n\n >>> record = airtable.match('Employee Id', 'DD13332454')\n >>> fields = {'Status': 'Fired'}\n >>> airtable.update(record['id'], fields)\n\n Args:\n record_id(``str``): Id of Record to update\n fields(``dict``): Fields to update.\n Must be dictionary with Column names as Key\n typecast(``boolean``): Automatic data conversion from string values.\n\n Returns:\n record (``dict``): Updated record\n \"\"\"\n record_url = self.record_url(record_id)\n return self._patch(record_url, json_data={\"fields\": fields, \"typecast\": typecast})\n"
] |
class Airtable():
VERSION = 'v0'
API_BASE_URL = 'https://api.airtable.com/'
API_LIMIT = 1.0 / 5 # 5 per second
API_URL = posixpath.join(API_BASE_URL, VERSION)
def __init__(self, base_key, table_name, api_key=None):
"""
If api_key is not provided, :any:`AirtableAuth` will attempt
to use ``os.environ['AIRTABLE_API_KEY']``
"""
session = requests.Session()
session.auth = AirtableAuth(api_key=api_key)
self.session = session
self.table_name = table_name
url_safe_table_name = quote(table_name, safe='')
self.url_table = posixpath.join(self.API_URL, base_key,
url_safe_table_name)
self.is_authenticated = self.validate_session(self.url_table)
def validate_session(self, url):
response = self.session.get(url, params={'maxRecords': 1})
if response.ok:
return True
elif response.status_code == 404:
raise ValueError('Invalid base or table name: {}'.format(url))
else:
raise ValueError(
'Authentication failed: {}'.format(response.reason))
def _process_params(self, params):
"""
Process params names or values as needed using filters
"""
new_params = OrderedDict()
for param_name, param_value in sorted(params.items()):
param_value = params[param_name]
ParamClass = AirtableParams._get(param_name)
new_params.update(ParamClass(param_value).to_param_dict())
return new_params
def _process_response(self, response):
try:
response.raise_for_status()
except requests.exceptions.HTTPError as exc:
err_msg = str(exc)
# Reports Decoded 422 Url for better troubleshooting
# Disabled in IronPython Bug:
# https://github.com/IronLanguages/ironpython2/issues/242
if not IS_IPY and response.status_code == 422:
err_msg = err_msg.replace(response.url, unquote(response.url))
err_msg += (' (Decoded URL)')
# Attempt to get Error message from response, Issue #16
try:
error_dict = response.json()
except json.decoder.JSONDecodeError:
pass
else:
if 'error' in error_dict:
err_msg += ' [Error: {}]'.format(error_dict['error'])
raise requests.exceptions.HTTPError(err_msg)
else:
return response.json()
def record_url(self, record_id):
""" Builds URL with record id """
return posixpath.join(self.url_table, record_id)
def _request(self, method, url, params=None, json_data=None):
response = self.session.request(method, url, params=params,
json=json_data)
return self._process_response(response)
def _get(self, url, **params):
processed_params = self._process_params(params)
return self._request('get', url, params=processed_params)
def _post(self, url, json_data):
return self._request('post', url, json_data=json_data)
def _put(self, url, json_data):
return self._request('put', url, json_data=json_data)
def _patch(self, url, json_data):
return self._request('patch', url, json_data=json_data)
def _delete(self, url):
return self._request('delete', url)
def get(self, record_id):
"""
Retrieves a record by its id
>>> record = airtable.get('recwPQIfs4wKPyc9D')
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Record
"""
record_url = self.record_url(record_id)
return self._get(record_url)
def get_iter(self, **options):
"""
Record Retriever Iterator
Returns iterator with lists in batches according to pageSize.
To get all records at once use :any:`get_all`
>>> for page in airtable.get_iter():
... for record in page:
... print(record)
[{'fields': ... }, ...]
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
page_size (``int``, optional ): The number of records returned
in each request. Must be less than or equal to 100.
Default is 100. See :any:`PageSizeParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
formula (``str``, optional): Airtable formula.
See :any:`FormulaParam`.
Returns:
iterator (``list``): List of Records, grouped by pageSize
"""
offset = None
while True:
data = self._get(self.url_table, offset=offset, **options)
records = data.get('records', [])
time.sleep(self.API_LIMIT)
yield records
offset = data.get('offset')
if not offset:
break
def get_all(self, **options):
"""
Retrieves all records repetitively and returns a single list.
>>> airtable.get_all()
>>> airtable.get_all(view='MyView', fields=['ColA', '-ColB'])
>>> airtable.get_all(maxRecords=50)
[{'fields': ... }, ...]
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
formula (``str``, optional): Airtable formula.
See :any:`FormulaParam`.
Returns:
records (``list``): List of Records
>>> records = get_all(maxRecords=3, view='All')
"""
all_records = []
for records in self.get_iter(**options):
all_records.extend(records)
return all_records
def match(self, field_name, field_value, **options):
"""
Returns first match found in :any:`get_all`
>>> airtable.match('Name', 'John')
{'fields': {'Name': 'John'} }
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): First record to match the field_value provided
"""
from_name_and_value = AirtableParams.FormulaParam.from_name_and_value
formula = from_name_and_value(field_name, field_value)
options['formula'] = formula
for record in self.get_all(**options):
return record
else:
return {}
def search(self, field_name, field_value, record=None, **options):
"""
Returns all matching records found in :any:`get_all`
>>> airtable.search('Gender', 'Male')
[{'fields': {'Name': 'John', 'Gender': 'Male'}, ... ]
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
records (``list``): All records that matched ``field_value``
"""
records = []
from_name_and_value = AirtableParams.FormulaParam.from_name_and_value
formula = from_name_and_value(field_name, field_value)
options['formula'] = formula
records = self.get_all(**options)
return records
def insert(self, fields, typecast=False):
"""
Inserts a record
>>> record = {'Name': 'John'}
>>> airtable.insert(record)
Args:
fields(``dict``): Fields to insert.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Inserted record
"""
return self._post(self.url_table, json_data={"fields": fields, "typecast": typecast})
def _batch_request(self, func, iterable):
""" Internal Function to limit batch calls to API limit """
responses = []
for item in iterable:
responses.append(func(item))
time.sleep(self.API_LIMIT)
return responses
def batch_insert(self, records, typecast=False):
"""
Calls :any:`insert` repetitively, following set API Rate Limit (5/sec)
To change the rate limit use ``airtable.API_LIMIT = 0.2``
(5 per second)
>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
>>> airtable.batch_insert(records)
Args:
records(``list``): Records to insert
typecast(``boolean``): Automatic data conversion from string values.
Returns:
records (``list``): list of added records
"""
return self._batch_request(self.insert, records)
def update(self, record_id, fields, typecast=False):
"""
Updates a record by its record id.
Only Fields passed are updated, the rest are left as is.
>>> record = airtable.match('Employee Id', 'DD13332454')
>>> fields = {'Status': 'Fired'}
>>> airtable.update(record['id'], fields)
Args:
record_id(``str``): Id of Record to update
fields(``dict``): Fields to update.
Must be dictionary with Column names as Key
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Updated record
"""
record_url = self.record_url(record_id)
return self._patch(record_url, json_data={"fields": fields, "typecast": typecast})
def update_by_field(self, field_name, field_value, fields, typecast=False, **options):
"""
Updates the first record to match field name and value.
Only Fields passed are updated, the rest are left as is.
>>> record = {'Name': 'John', 'Tel': '540-255-5522'}
>>> airtable.update_by_field('Name', 'John', record)
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
fields(``dict``): Fields to update.
Must be dictionary with Column names as Key
typecast(``boolean``): Automatic data conversion from string values.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): Updated record
"""
record = self.match(field_name, field_value, **options)
return {} if not record else self.update(record['id'], fields, typecast)
def replace(self, record_id, fields, typecast=False):
"""
Replaces a record by its record id.
All Fields are updated to match the new ``fields`` provided.
If a field is not included in ``fields``, value will bet set to null.
To update only selected fields, use :any:`update`.
>>> record = airtable.match('Seat Number', '22A')
>>> fields = {'PassangerName': 'Mike', 'Passport': 'YASD232-23'}
>>> airtable.replace(record['id'], fields)
Args:
record_id(``str``): Id of Record to update
fields(``dict``): Fields to replace with.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): New record
"""
record_url = self.record_url(record_id)
return self._put(record_url, json_data={"fields": fields, "typecast": typecast})
def replace_by_field(self, field_name, field_value, fields, typecast=False, **options):
"""
Replaces the first record to match field name and value.
All Fields are updated to match the new ``fields`` provided.
If a field is not included in ``fields``, value will bet set to null.
To update only selected fields, use :any:`update`.
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
fields(``dict``): Fields to replace with.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): New record
"""
record = self.match(field_name, field_value, **options)
return {} if not record else self.replace(record['id'], fields, typecast)
def delete(self, record_id):
"""
Deletes a record by its id
>>> record = airtable.match('Employee Id', 'DD13332454')
>>> airtable.delete(record['id'])
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Deleted Record
"""
record_url = self.record_url(record_id)
return self._delete(record_url)
def delete_by_field(self, field_name, field_value, **options):
"""
Deletes first record to match provided ``field_name`` and
``field_value``.
>>> record = airtable.delete_by_field('Employee Id', 'DD13332454')
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): Deleted Record
"""
record = self.match(field_name, field_value, **options)
record_url = self.record_url(record['id'])
return self._delete(record_url)
def batch_delete(self, record_ids):
"""
Calls :any:`delete` repetitively, following set API Rate Limit (5/sec)
To change the rate limit set value of ``airtable.API_LIMIT`` to
the time in seconds it should sleep before calling the function again.
>>> record_ids = ['recwPQIfs4wKPyc9D', 'recwDxIfs3wDPyc3F']
>>> airtable.batch_delete(records_ids)
Args:
records(``list``): Record Ids to delete
Returns:
records(``list``): list of records deleted
"""
return self._batch_request(self.delete, record_ids)
def mirror(self, records, **options):
"""
Deletes all records on table or view and replaces with records.
>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
>>> record = airtable.,mirror(records)
If view options are provided, only records visible on that view will
be deleted.
>>> record = airtable.mirror(records, view='View')
([{'id': 'recwPQIfs4wKPyc9D', ... }], [{'deleted': True, ... }])
Args:
records(``list``): Records to insert
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
Returns:
records (``tuple``): (new_records, deleted_records)
"""
all_record_ids = [r['id'] for r in self.get_all(**options)]
deleted_records = self.batch_delete(all_record_ids)
new_records = self.batch_insert(records)
return (new_records, deleted_records)
def __repr__(self):
return '<Airtable table:{}>'.format(self.table_name)
|
gtalarico/airtable-python-wrapper
|
airtable/airtable.py
|
Airtable.replace
|
python
|
def replace(self, record_id, fields, typecast=False):
"""
Replaces a record by its record id.
All Fields are updated to match the new ``fields`` provided.
If a field is not included in ``fields``, value will bet set to null.
To update only selected fields, use :any:`update`.
>>> record = airtable.match('Seat Number', '22A')
>>> fields = {'PassangerName': 'Mike', 'Passport': 'YASD232-23'}
>>> airtable.replace(record['id'], fields)
Args:
record_id(``str``): Id of Record to update
fields(``dict``): Fields to replace with.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): New record
"""
record_url = self.record_url(record_id)
return self._put(record_url, json_data={"fields": fields, "typecast": typecast})
|
Replaces a record by its record id.
All Fields are updated to match the new ``fields`` provided.
If a field is not included in ``fields``, value will bet set to null.
To update only selected fields, use :any:`update`.
>>> record = airtable.match('Seat Number', '22A')
>>> fields = {'PassangerName': 'Mike', 'Passport': 'YASD232-23'}
>>> airtable.replace(record['id'], fields)
Args:
record_id(``str``): Id of Record to update
fields(``dict``): Fields to replace with.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): New record
|
train
|
https://github.com/gtalarico/airtable-python-wrapper/blob/48b2d806178085b52a31817571e5a1fc3dce4045/airtable/airtable.py#L447-L468
|
[
"def record_url(self, record_id):\n \"\"\" Builds URL with record id \"\"\"\n return posixpath.join(self.url_table, record_id)\n",
"def _put(self, url, json_data):\n return self._request('put', url, json_data=json_data)\n"
] |
class Airtable():
VERSION = 'v0'
API_BASE_URL = 'https://api.airtable.com/'
API_LIMIT = 1.0 / 5 # 5 per second
API_URL = posixpath.join(API_BASE_URL, VERSION)
def __init__(self, base_key, table_name, api_key=None):
"""
If api_key is not provided, :any:`AirtableAuth` will attempt
to use ``os.environ['AIRTABLE_API_KEY']``
"""
session = requests.Session()
session.auth = AirtableAuth(api_key=api_key)
self.session = session
self.table_name = table_name
url_safe_table_name = quote(table_name, safe='')
self.url_table = posixpath.join(self.API_URL, base_key,
url_safe_table_name)
self.is_authenticated = self.validate_session(self.url_table)
def validate_session(self, url):
response = self.session.get(url, params={'maxRecords': 1})
if response.ok:
return True
elif response.status_code == 404:
raise ValueError('Invalid base or table name: {}'.format(url))
else:
raise ValueError(
'Authentication failed: {}'.format(response.reason))
def _process_params(self, params):
"""
Process params names or values as needed using filters
"""
new_params = OrderedDict()
for param_name, param_value in sorted(params.items()):
param_value = params[param_name]
ParamClass = AirtableParams._get(param_name)
new_params.update(ParamClass(param_value).to_param_dict())
return new_params
def _process_response(self, response):
try:
response.raise_for_status()
except requests.exceptions.HTTPError as exc:
err_msg = str(exc)
# Reports Decoded 422 Url for better troubleshooting
# Disabled in IronPython Bug:
# https://github.com/IronLanguages/ironpython2/issues/242
if not IS_IPY and response.status_code == 422:
err_msg = err_msg.replace(response.url, unquote(response.url))
err_msg += (' (Decoded URL)')
# Attempt to get Error message from response, Issue #16
try:
error_dict = response.json()
except json.decoder.JSONDecodeError:
pass
else:
if 'error' in error_dict:
err_msg += ' [Error: {}]'.format(error_dict['error'])
raise requests.exceptions.HTTPError(err_msg)
else:
return response.json()
def record_url(self, record_id):
""" Builds URL with record id """
return posixpath.join(self.url_table, record_id)
def _request(self, method, url, params=None, json_data=None):
response = self.session.request(method, url, params=params,
json=json_data)
return self._process_response(response)
def _get(self, url, **params):
processed_params = self._process_params(params)
return self._request('get', url, params=processed_params)
def _post(self, url, json_data):
return self._request('post', url, json_data=json_data)
def _put(self, url, json_data):
return self._request('put', url, json_data=json_data)
def _patch(self, url, json_data):
return self._request('patch', url, json_data=json_data)
def _delete(self, url):
return self._request('delete', url)
def get(self, record_id):
"""
Retrieves a record by its id
>>> record = airtable.get('recwPQIfs4wKPyc9D')
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Record
"""
record_url = self.record_url(record_id)
return self._get(record_url)
def get_iter(self, **options):
"""
Record Retriever Iterator
Returns iterator with lists in batches according to pageSize.
To get all records at once use :any:`get_all`
>>> for page in airtable.get_iter():
... for record in page:
... print(record)
[{'fields': ... }, ...]
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
page_size (``int``, optional ): The number of records returned
in each request. Must be less than or equal to 100.
Default is 100. See :any:`PageSizeParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
formula (``str``, optional): Airtable formula.
See :any:`FormulaParam`.
Returns:
iterator (``list``): List of Records, grouped by pageSize
"""
offset = None
while True:
data = self._get(self.url_table, offset=offset, **options)
records = data.get('records', [])
time.sleep(self.API_LIMIT)
yield records
offset = data.get('offset')
if not offset:
break
def get_all(self, **options):
"""
Retrieves all records repetitively and returns a single list.
>>> airtable.get_all()
>>> airtable.get_all(view='MyView', fields=['ColA', '-ColB'])
>>> airtable.get_all(maxRecords=50)
[{'fields': ... }, ...]
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
formula (``str``, optional): Airtable formula.
See :any:`FormulaParam`.
Returns:
records (``list``): List of Records
>>> records = get_all(maxRecords=3, view='All')
"""
all_records = []
for records in self.get_iter(**options):
all_records.extend(records)
return all_records
def match(self, field_name, field_value, **options):
"""
Returns first match found in :any:`get_all`
>>> airtable.match('Name', 'John')
{'fields': {'Name': 'John'} }
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): First record to match the field_value provided
"""
from_name_and_value = AirtableParams.FormulaParam.from_name_and_value
formula = from_name_and_value(field_name, field_value)
options['formula'] = formula
for record in self.get_all(**options):
return record
else:
return {}
def search(self, field_name, field_value, record=None, **options):
"""
Returns all matching records found in :any:`get_all`
>>> airtable.search('Gender', 'Male')
[{'fields': {'Name': 'John', 'Gender': 'Male'}, ... ]
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
records (``list``): All records that matched ``field_value``
"""
records = []
from_name_and_value = AirtableParams.FormulaParam.from_name_and_value
formula = from_name_and_value(field_name, field_value)
options['formula'] = formula
records = self.get_all(**options)
return records
def insert(self, fields, typecast=False):
"""
Inserts a record
>>> record = {'Name': 'John'}
>>> airtable.insert(record)
Args:
fields(``dict``): Fields to insert.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Inserted record
"""
return self._post(self.url_table, json_data={"fields": fields, "typecast": typecast})
def _batch_request(self, func, iterable):
""" Internal Function to limit batch calls to API limit """
responses = []
for item in iterable:
responses.append(func(item))
time.sleep(self.API_LIMIT)
return responses
def batch_insert(self, records, typecast=False):
"""
Calls :any:`insert` repetitively, following set API Rate Limit (5/sec)
To change the rate limit use ``airtable.API_LIMIT = 0.2``
(5 per second)
>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
>>> airtable.batch_insert(records)
Args:
records(``list``): Records to insert
typecast(``boolean``): Automatic data conversion from string values.
Returns:
records (``list``): list of added records
"""
return self._batch_request(self.insert, records)
def update(self, record_id, fields, typecast=False):
"""
Updates a record by its record id.
Only Fields passed are updated, the rest are left as is.
>>> record = airtable.match('Employee Id', 'DD13332454')
>>> fields = {'Status': 'Fired'}
>>> airtable.update(record['id'], fields)
Args:
record_id(``str``): Id of Record to update
fields(``dict``): Fields to update.
Must be dictionary with Column names as Key
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Updated record
"""
record_url = self.record_url(record_id)
return self._patch(record_url, json_data={"fields": fields, "typecast": typecast})
def update_by_field(self, field_name, field_value, fields, typecast=False, **options):
"""
Updates the first record to match field name and value.
Only Fields passed are updated, the rest are left as is.
>>> record = {'Name': 'John', 'Tel': '540-255-5522'}
>>> airtable.update_by_field('Name', 'John', record)
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
fields(``dict``): Fields to update.
Must be dictionary with Column names as Key
typecast(``boolean``): Automatic data conversion from string values.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): Updated record
"""
record = self.match(field_name, field_value, **options)
return {} if not record else self.update(record['id'], fields, typecast)
def replace(self, record_id, fields, typecast=False):
"""
Replaces a record by its record id.
All Fields are updated to match the new ``fields`` provided.
If a field is not included in ``fields``, value will bet set to null.
To update only selected fields, use :any:`update`.
>>> record = airtable.match('Seat Number', '22A')
>>> fields = {'PassangerName': 'Mike', 'Passport': 'YASD232-23'}
>>> airtable.replace(record['id'], fields)
Args:
record_id(``str``): Id of Record to update
fields(``dict``): Fields to replace with.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): New record
"""
record_url = self.record_url(record_id)
return self._put(record_url, json_data={"fields": fields, "typecast": typecast})
def replace_by_field(self, field_name, field_value, fields, typecast=False, **options):
"""
Replaces the first record to match field name and value.
All Fields are updated to match the new ``fields`` provided.
If a field is not included in ``fields``, value will bet set to null.
To update only selected fields, use :any:`update`.
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
fields(``dict``): Fields to replace with.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): New record
"""
record = self.match(field_name, field_value, **options)
return {} if not record else self.replace(record['id'], fields, typecast)
def delete(self, record_id):
"""
Deletes a record by its id
>>> record = airtable.match('Employee Id', 'DD13332454')
>>> airtable.delete(record['id'])
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Deleted Record
"""
record_url = self.record_url(record_id)
return self._delete(record_url)
def delete_by_field(self, field_name, field_value, **options):
"""
Deletes first record to match provided ``field_name`` and
``field_value``.
>>> record = airtable.delete_by_field('Employee Id', 'DD13332454')
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): Deleted Record
"""
record = self.match(field_name, field_value, **options)
record_url = self.record_url(record['id'])
return self._delete(record_url)
def batch_delete(self, record_ids):
"""
Calls :any:`delete` repetitively, following set API Rate Limit (5/sec)
To change the rate limit set value of ``airtable.API_LIMIT`` to
the time in seconds it should sleep before calling the function again.
>>> record_ids = ['recwPQIfs4wKPyc9D', 'recwDxIfs3wDPyc3F']
>>> airtable.batch_delete(records_ids)
Args:
records(``list``): Record Ids to delete
Returns:
records(``list``): list of records deleted
"""
return self._batch_request(self.delete, record_ids)
def mirror(self, records, **options):
"""
Deletes all records on table or view and replaces with records.
>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
>>> record = airtable.,mirror(records)
If view options are provided, only records visible on that view will
be deleted.
>>> record = airtable.mirror(records, view='View')
([{'id': 'recwPQIfs4wKPyc9D', ... }], [{'deleted': True, ... }])
Args:
records(``list``): Records to insert
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
Returns:
records (``tuple``): (new_records, deleted_records)
"""
all_record_ids = [r['id'] for r in self.get_all(**options)]
deleted_records = self.batch_delete(all_record_ids)
new_records = self.batch_insert(records)
return (new_records, deleted_records)
def __repr__(self):
return '<Airtable table:{}>'.format(self.table_name)
|
gtalarico/airtable-python-wrapper
|
airtable/airtable.py
|
Airtable.replace_by_field
|
python
|
def replace_by_field(self, field_name, field_value, fields, typecast=False, **options):
"""
Replaces the first record to match field name and value.
All Fields are updated to match the new ``fields`` provided.
If a field is not included in ``fields``, value will bet set to null.
To update only selected fields, use :any:`update`.
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
fields(``dict``): Fields to replace with.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): New record
"""
record = self.match(field_name, field_value, **options)
return {} if not record else self.replace(record['id'], fields, typecast)
|
Replaces the first record to match field name and value.
All Fields are updated to match the new ``fields`` provided.
If a field is not included in ``fields``, value will bet set to null.
To update only selected fields, use :any:`update`.
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
fields(``dict``): Fields to replace with.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): New record
|
train
|
https://github.com/gtalarico/airtable-python-wrapper/blob/48b2d806178085b52a31817571e5a1fc3dce4045/airtable/airtable.py#L470-L494
|
[
"def match(self, field_name, field_value, **options):\n \"\"\"\n Returns first match found in :any:`get_all`\n\n >>> airtable.match('Name', 'John')\n {'fields': {'Name': 'John'} }\n\n Args:\n field_name (``str``): Name of field to match (column name).\n field_value (``str``): Value of field to match.\n\n Keyword Args:\n max_records (``int``, optional): The maximum total number of\n records that will be returned. See :any:`MaxRecordsParam`\n view (``str``, optional): The name or ID of a view.\n See :any:`ViewParam`.\n fields (``str``, ``list``, optional): Name of field or fields to\n be retrieved. Default is all fields. See :any:`FieldsParam`.\n sort (``list``, optional): List of fields to sort by.\n Default order is ascending. See :any:`SortParam`.\n\n Returns:\n record (``dict``): First record to match the field_value provided\n \"\"\"\n from_name_and_value = AirtableParams.FormulaParam.from_name_and_value\n formula = from_name_and_value(field_name, field_value)\n options['formula'] = formula\n for record in self.get_all(**options):\n return record\n",
"def replace(self, record_id, fields, typecast=False):\n \"\"\"\n Replaces a record by its record id.\n All Fields are updated to match the new ``fields`` provided.\n If a field is not included in ``fields``, value will bet set to null.\n To update only selected fields, use :any:`update`.\n\n >>> record = airtable.match('Seat Number', '22A')\n >>> fields = {'PassangerName': 'Mike', 'Passport': 'YASD232-23'}\n >>> airtable.replace(record['id'], fields)\n\n Args:\n record_id(``str``): Id of Record to update\n fields(``dict``): Fields to replace with.\n Must be dictionary with Column names as Key.\n typecast(``boolean``): Automatic data conversion from string values.\n\n Returns:\n record (``dict``): New record\n \"\"\"\n record_url = self.record_url(record_id)\n return self._put(record_url, json_data={\"fields\": fields, \"typecast\": typecast})\n"
] |
class Airtable():
VERSION = 'v0'
API_BASE_URL = 'https://api.airtable.com/'
API_LIMIT = 1.0 / 5 # 5 per second
API_URL = posixpath.join(API_BASE_URL, VERSION)
def __init__(self, base_key, table_name, api_key=None):
"""
If api_key is not provided, :any:`AirtableAuth` will attempt
to use ``os.environ['AIRTABLE_API_KEY']``
"""
session = requests.Session()
session.auth = AirtableAuth(api_key=api_key)
self.session = session
self.table_name = table_name
url_safe_table_name = quote(table_name, safe='')
self.url_table = posixpath.join(self.API_URL, base_key,
url_safe_table_name)
self.is_authenticated = self.validate_session(self.url_table)
def validate_session(self, url):
response = self.session.get(url, params={'maxRecords': 1})
if response.ok:
return True
elif response.status_code == 404:
raise ValueError('Invalid base or table name: {}'.format(url))
else:
raise ValueError(
'Authentication failed: {}'.format(response.reason))
def _process_params(self, params):
"""
Process params names or values as needed using filters
"""
new_params = OrderedDict()
for param_name, param_value in sorted(params.items()):
param_value = params[param_name]
ParamClass = AirtableParams._get(param_name)
new_params.update(ParamClass(param_value).to_param_dict())
return new_params
def _process_response(self, response):
try:
response.raise_for_status()
except requests.exceptions.HTTPError as exc:
err_msg = str(exc)
# Reports Decoded 422 Url for better troubleshooting
# Disabled in IronPython Bug:
# https://github.com/IronLanguages/ironpython2/issues/242
if not IS_IPY and response.status_code == 422:
err_msg = err_msg.replace(response.url, unquote(response.url))
err_msg += (' (Decoded URL)')
# Attempt to get Error message from response, Issue #16
try:
error_dict = response.json()
except json.decoder.JSONDecodeError:
pass
else:
if 'error' in error_dict:
err_msg += ' [Error: {}]'.format(error_dict['error'])
raise requests.exceptions.HTTPError(err_msg)
else:
return response.json()
def record_url(self, record_id):
""" Builds URL with record id """
return posixpath.join(self.url_table, record_id)
def _request(self, method, url, params=None, json_data=None):
response = self.session.request(method, url, params=params,
json=json_data)
return self._process_response(response)
def _get(self, url, **params):
processed_params = self._process_params(params)
return self._request('get', url, params=processed_params)
def _post(self, url, json_data):
return self._request('post', url, json_data=json_data)
def _put(self, url, json_data):
return self._request('put', url, json_data=json_data)
def _patch(self, url, json_data):
return self._request('patch', url, json_data=json_data)
def _delete(self, url):
return self._request('delete', url)
def get(self, record_id):
"""
Retrieves a record by its id
>>> record = airtable.get('recwPQIfs4wKPyc9D')
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Record
"""
record_url = self.record_url(record_id)
return self._get(record_url)
def get_iter(self, **options):
"""
Record Retriever Iterator
Returns iterator with lists in batches according to pageSize.
To get all records at once use :any:`get_all`
>>> for page in airtable.get_iter():
... for record in page:
... print(record)
[{'fields': ... }, ...]
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
page_size (``int``, optional ): The number of records returned
in each request. Must be less than or equal to 100.
Default is 100. See :any:`PageSizeParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
formula (``str``, optional): Airtable formula.
See :any:`FormulaParam`.
Returns:
iterator (``list``): List of Records, grouped by pageSize
"""
offset = None
while True:
data = self._get(self.url_table, offset=offset, **options)
records = data.get('records', [])
time.sleep(self.API_LIMIT)
yield records
offset = data.get('offset')
if not offset:
break
def get_all(self, **options):
"""
Retrieves all records repetitively and returns a single list.
>>> airtable.get_all()
>>> airtable.get_all(view='MyView', fields=['ColA', '-ColB'])
>>> airtable.get_all(maxRecords=50)
[{'fields': ... }, ...]
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
formula (``str``, optional): Airtable formula.
See :any:`FormulaParam`.
Returns:
records (``list``): List of Records
>>> records = get_all(maxRecords=3, view='All')
"""
all_records = []
for records in self.get_iter(**options):
all_records.extend(records)
return all_records
def match(self, field_name, field_value, **options):
"""
Returns first match found in :any:`get_all`
>>> airtable.match('Name', 'John')
{'fields': {'Name': 'John'} }
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): First record to match the field_value provided
"""
from_name_and_value = AirtableParams.FormulaParam.from_name_and_value
formula = from_name_and_value(field_name, field_value)
options['formula'] = formula
for record in self.get_all(**options):
return record
else:
return {}
def search(self, field_name, field_value, record=None, **options):
"""
Returns all matching records found in :any:`get_all`
>>> airtable.search('Gender', 'Male')
[{'fields': {'Name': 'John', 'Gender': 'Male'}, ... ]
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
records (``list``): All records that matched ``field_value``
"""
records = []
from_name_and_value = AirtableParams.FormulaParam.from_name_and_value
formula = from_name_and_value(field_name, field_value)
options['formula'] = formula
records = self.get_all(**options)
return records
def insert(self, fields, typecast=False):
"""
Inserts a record
>>> record = {'Name': 'John'}
>>> airtable.insert(record)
Args:
fields(``dict``): Fields to insert.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Inserted record
"""
return self._post(self.url_table, json_data={"fields": fields, "typecast": typecast})
def _batch_request(self, func, iterable):
""" Internal Function to limit batch calls to API limit """
responses = []
for item in iterable:
responses.append(func(item))
time.sleep(self.API_LIMIT)
return responses
def batch_insert(self, records, typecast=False):
"""
Calls :any:`insert` repetitively, following set API Rate Limit (5/sec)
To change the rate limit use ``airtable.API_LIMIT = 0.2``
(5 per second)
>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
>>> airtable.batch_insert(records)
Args:
records(``list``): Records to insert
typecast(``boolean``): Automatic data conversion from string values.
Returns:
records (``list``): list of added records
"""
return self._batch_request(self.insert, records)
def update(self, record_id, fields, typecast=False):
"""
Updates a record by its record id.
Only Fields passed are updated, the rest are left as is.
>>> record = airtable.match('Employee Id', 'DD13332454')
>>> fields = {'Status': 'Fired'}
>>> airtable.update(record['id'], fields)
Args:
record_id(``str``): Id of Record to update
fields(``dict``): Fields to update.
Must be dictionary with Column names as Key
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Updated record
"""
record_url = self.record_url(record_id)
return self._patch(record_url, json_data={"fields": fields, "typecast": typecast})
def update_by_field(self, field_name, field_value, fields, typecast=False, **options):
"""
Updates the first record to match field name and value.
Only Fields passed are updated, the rest are left as is.
>>> record = {'Name': 'John', 'Tel': '540-255-5522'}
>>> airtable.update_by_field('Name', 'John', record)
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
fields(``dict``): Fields to update.
Must be dictionary with Column names as Key
typecast(``boolean``): Automatic data conversion from string values.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): Updated record
"""
record = self.match(field_name, field_value, **options)
return {} if not record else self.update(record['id'], fields, typecast)
def replace(self, record_id, fields, typecast=False):
"""
Replaces a record by its record id.
All Fields are updated to match the new ``fields`` provided.
If a field is not included in ``fields``, value will bet set to null.
To update only selected fields, use :any:`update`.
>>> record = airtable.match('Seat Number', '22A')
>>> fields = {'PassangerName': 'Mike', 'Passport': 'YASD232-23'}
>>> airtable.replace(record['id'], fields)
Args:
record_id(``str``): Id of Record to update
fields(``dict``): Fields to replace with.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): New record
"""
record_url = self.record_url(record_id)
return self._put(record_url, json_data={"fields": fields, "typecast": typecast})
def replace_by_field(self, field_name, field_value, fields, typecast=False, **options):
"""
Replaces the first record to match field name and value.
All Fields are updated to match the new ``fields`` provided.
If a field is not included in ``fields``, value will bet set to null.
To update only selected fields, use :any:`update`.
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
fields(``dict``): Fields to replace with.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): New record
"""
record = self.match(field_name, field_value, **options)
return {} if not record else self.replace(record['id'], fields, typecast)
def delete(self, record_id):
"""
Deletes a record by its id
>>> record = airtable.match('Employee Id', 'DD13332454')
>>> airtable.delete(record['id'])
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Deleted Record
"""
record_url = self.record_url(record_id)
return self._delete(record_url)
def delete_by_field(self, field_name, field_value, **options):
"""
Deletes first record to match provided ``field_name`` and
``field_value``.
>>> record = airtable.delete_by_field('Employee Id', 'DD13332454')
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): Deleted Record
"""
record = self.match(field_name, field_value, **options)
record_url = self.record_url(record['id'])
return self._delete(record_url)
def batch_delete(self, record_ids):
"""
Calls :any:`delete` repetitively, following set API Rate Limit (5/sec)
To change the rate limit set value of ``airtable.API_LIMIT`` to
the time in seconds it should sleep before calling the function again.
>>> record_ids = ['recwPQIfs4wKPyc9D', 'recwDxIfs3wDPyc3F']
>>> airtable.batch_delete(records_ids)
Args:
records(``list``): Record Ids to delete
Returns:
records(``list``): list of records deleted
"""
return self._batch_request(self.delete, record_ids)
def mirror(self, records, **options):
"""
Deletes all records on table or view and replaces with records.
>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
>>> record = airtable.,mirror(records)
If view options are provided, only records visible on that view will
be deleted.
>>> record = airtable.mirror(records, view='View')
([{'id': 'recwPQIfs4wKPyc9D', ... }], [{'deleted': True, ... }])
Args:
records(``list``): Records to insert
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
Returns:
records (``tuple``): (new_records, deleted_records)
"""
all_record_ids = [r['id'] for r in self.get_all(**options)]
deleted_records = self.batch_delete(all_record_ids)
new_records = self.batch_insert(records)
return (new_records, deleted_records)
def __repr__(self):
return '<Airtable table:{}>'.format(self.table_name)
|
gtalarico/airtable-python-wrapper
|
airtable/airtable.py
|
Airtable.delete
|
python
|
def delete(self, record_id):
"""
Deletes a record by its id
>>> record = airtable.match('Employee Id', 'DD13332454')
>>> airtable.delete(record['id'])
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Deleted Record
"""
record_url = self.record_url(record_id)
return self._delete(record_url)
|
Deletes a record by its id
>>> record = airtable.match('Employee Id', 'DD13332454')
>>> airtable.delete(record['id'])
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Deleted Record
|
train
|
https://github.com/gtalarico/airtable-python-wrapper/blob/48b2d806178085b52a31817571e5a1fc3dce4045/airtable/airtable.py#L496-L510
|
[
"def record_url(self, record_id):\n \"\"\" Builds URL with record id \"\"\"\n return posixpath.join(self.url_table, record_id)\n",
"def _delete(self, url):\n return self._request('delete', url)\n"
] |
class Airtable():
VERSION = 'v0'
API_BASE_URL = 'https://api.airtable.com/'
API_LIMIT = 1.0 / 5 # 5 per second
API_URL = posixpath.join(API_BASE_URL, VERSION)
def __init__(self, base_key, table_name, api_key=None):
"""
If api_key is not provided, :any:`AirtableAuth` will attempt
to use ``os.environ['AIRTABLE_API_KEY']``
"""
session = requests.Session()
session.auth = AirtableAuth(api_key=api_key)
self.session = session
self.table_name = table_name
url_safe_table_name = quote(table_name, safe='')
self.url_table = posixpath.join(self.API_URL, base_key,
url_safe_table_name)
self.is_authenticated = self.validate_session(self.url_table)
def validate_session(self, url):
response = self.session.get(url, params={'maxRecords': 1})
if response.ok:
return True
elif response.status_code == 404:
raise ValueError('Invalid base or table name: {}'.format(url))
else:
raise ValueError(
'Authentication failed: {}'.format(response.reason))
def _process_params(self, params):
"""
Process params names or values as needed using filters
"""
new_params = OrderedDict()
for param_name, param_value in sorted(params.items()):
param_value = params[param_name]
ParamClass = AirtableParams._get(param_name)
new_params.update(ParamClass(param_value).to_param_dict())
return new_params
def _process_response(self, response):
try:
response.raise_for_status()
except requests.exceptions.HTTPError as exc:
err_msg = str(exc)
# Reports Decoded 422 Url for better troubleshooting
# Disabled in IronPython Bug:
# https://github.com/IronLanguages/ironpython2/issues/242
if not IS_IPY and response.status_code == 422:
err_msg = err_msg.replace(response.url, unquote(response.url))
err_msg += (' (Decoded URL)')
# Attempt to get Error message from response, Issue #16
try:
error_dict = response.json()
except json.decoder.JSONDecodeError:
pass
else:
if 'error' in error_dict:
err_msg += ' [Error: {}]'.format(error_dict['error'])
raise requests.exceptions.HTTPError(err_msg)
else:
return response.json()
def record_url(self, record_id):
""" Builds URL with record id """
return posixpath.join(self.url_table, record_id)
def _request(self, method, url, params=None, json_data=None):
response = self.session.request(method, url, params=params,
json=json_data)
return self._process_response(response)
def _get(self, url, **params):
processed_params = self._process_params(params)
return self._request('get', url, params=processed_params)
def _post(self, url, json_data):
return self._request('post', url, json_data=json_data)
def _put(self, url, json_data):
return self._request('put', url, json_data=json_data)
def _patch(self, url, json_data):
return self._request('patch', url, json_data=json_data)
def _delete(self, url):
return self._request('delete', url)
def get(self, record_id):
"""
Retrieves a record by its id
>>> record = airtable.get('recwPQIfs4wKPyc9D')
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Record
"""
record_url = self.record_url(record_id)
return self._get(record_url)
def get_iter(self, **options):
"""
Record Retriever Iterator
Returns iterator with lists in batches according to pageSize.
To get all records at once use :any:`get_all`
>>> for page in airtable.get_iter():
... for record in page:
... print(record)
[{'fields': ... }, ...]
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
page_size (``int``, optional ): The number of records returned
in each request. Must be less than or equal to 100.
Default is 100. See :any:`PageSizeParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
formula (``str``, optional): Airtable formula.
See :any:`FormulaParam`.
Returns:
iterator (``list``): List of Records, grouped by pageSize
"""
offset = None
while True:
data = self._get(self.url_table, offset=offset, **options)
records = data.get('records', [])
time.sleep(self.API_LIMIT)
yield records
offset = data.get('offset')
if not offset:
break
def get_all(self, **options):
"""
Retrieves all records repetitively and returns a single list.
>>> airtable.get_all()
>>> airtable.get_all(view='MyView', fields=['ColA', '-ColB'])
>>> airtable.get_all(maxRecords=50)
[{'fields': ... }, ...]
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
formula (``str``, optional): Airtable formula.
See :any:`FormulaParam`.
Returns:
records (``list``): List of Records
>>> records = get_all(maxRecords=3, view='All')
"""
all_records = []
for records in self.get_iter(**options):
all_records.extend(records)
return all_records
def match(self, field_name, field_value, **options):
"""
Returns first match found in :any:`get_all`
>>> airtable.match('Name', 'John')
{'fields': {'Name': 'John'} }
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): First record to match the field_value provided
"""
from_name_and_value = AirtableParams.FormulaParam.from_name_and_value
formula = from_name_and_value(field_name, field_value)
options['formula'] = formula
for record in self.get_all(**options):
return record
else:
return {}
def search(self, field_name, field_value, record=None, **options):
"""
Returns all matching records found in :any:`get_all`
>>> airtable.search('Gender', 'Male')
[{'fields': {'Name': 'John', 'Gender': 'Male'}, ... ]
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
records (``list``): All records that matched ``field_value``
"""
records = []
from_name_and_value = AirtableParams.FormulaParam.from_name_and_value
formula = from_name_and_value(field_name, field_value)
options['formula'] = formula
records = self.get_all(**options)
return records
def insert(self, fields, typecast=False):
"""
Inserts a record
>>> record = {'Name': 'John'}
>>> airtable.insert(record)
Args:
fields(``dict``): Fields to insert.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Inserted record
"""
return self._post(self.url_table, json_data={"fields": fields, "typecast": typecast})
def _batch_request(self, func, iterable):
""" Internal Function to limit batch calls to API limit """
responses = []
for item in iterable:
responses.append(func(item))
time.sleep(self.API_LIMIT)
return responses
def batch_insert(self, records, typecast=False):
"""
Calls :any:`insert` repetitively, following set API Rate Limit (5/sec)
To change the rate limit use ``airtable.API_LIMIT = 0.2``
(5 per second)
>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
>>> airtable.batch_insert(records)
Args:
records(``list``): Records to insert
typecast(``boolean``): Automatic data conversion from string values.
Returns:
records (``list``): list of added records
"""
return self._batch_request(self.insert, records)
def update(self, record_id, fields, typecast=False):
"""
Updates a record by its record id.
Only Fields passed are updated, the rest are left as is.
>>> record = airtable.match('Employee Id', 'DD13332454')
>>> fields = {'Status': 'Fired'}
>>> airtable.update(record['id'], fields)
Args:
record_id(``str``): Id of Record to update
fields(``dict``): Fields to update.
Must be dictionary with Column names as Key
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Updated record
"""
record_url = self.record_url(record_id)
return self._patch(record_url, json_data={"fields": fields, "typecast": typecast})
def update_by_field(self, field_name, field_value, fields, typecast=False, **options):
"""
Updates the first record to match field name and value.
Only Fields passed are updated, the rest are left as is.
>>> record = {'Name': 'John', 'Tel': '540-255-5522'}
>>> airtable.update_by_field('Name', 'John', record)
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
fields(``dict``): Fields to update.
Must be dictionary with Column names as Key
typecast(``boolean``): Automatic data conversion from string values.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): Updated record
"""
record = self.match(field_name, field_value, **options)
return {} if not record else self.update(record['id'], fields, typecast)
def replace(self, record_id, fields, typecast=False):
"""
Replaces a record by its record id.
All Fields are updated to match the new ``fields`` provided.
If a field is not included in ``fields``, value will bet set to null.
To update only selected fields, use :any:`update`.
>>> record = airtable.match('Seat Number', '22A')
>>> fields = {'PassangerName': 'Mike', 'Passport': 'YASD232-23'}
>>> airtable.replace(record['id'], fields)
Args:
record_id(``str``): Id of Record to update
fields(``dict``): Fields to replace with.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): New record
"""
record_url = self.record_url(record_id)
return self._put(record_url, json_data={"fields": fields, "typecast": typecast})
def replace_by_field(self, field_name, field_value, fields, typecast=False, **options):
"""
Replaces the first record to match field name and value.
All Fields are updated to match the new ``fields`` provided.
If a field is not included in ``fields``, value will bet set to null.
To update only selected fields, use :any:`update`.
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
fields(``dict``): Fields to replace with.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): New record
"""
record = self.match(field_name, field_value, **options)
return {} if not record else self.replace(record['id'], fields, typecast)
def delete(self, record_id):
"""
Deletes a record by its id
>>> record = airtable.match('Employee Id', 'DD13332454')
>>> airtable.delete(record['id'])
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Deleted Record
"""
record_url = self.record_url(record_id)
return self._delete(record_url)
def delete_by_field(self, field_name, field_value, **options):
"""
Deletes first record to match provided ``field_name`` and
``field_value``.
>>> record = airtable.delete_by_field('Employee Id', 'DD13332454')
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): Deleted Record
"""
record = self.match(field_name, field_value, **options)
record_url = self.record_url(record['id'])
return self._delete(record_url)
def batch_delete(self, record_ids):
"""
Calls :any:`delete` repetitively, following set API Rate Limit (5/sec)
To change the rate limit set value of ``airtable.API_LIMIT`` to
the time in seconds it should sleep before calling the function again.
>>> record_ids = ['recwPQIfs4wKPyc9D', 'recwDxIfs3wDPyc3F']
>>> airtable.batch_delete(records_ids)
Args:
records(``list``): Record Ids to delete
Returns:
records(``list``): list of records deleted
"""
return self._batch_request(self.delete, record_ids)
def mirror(self, records, **options):
"""
Deletes all records on table or view and replaces with records.
>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
>>> record = airtable.,mirror(records)
If view options are provided, only records visible on that view will
be deleted.
>>> record = airtable.mirror(records, view='View')
([{'id': 'recwPQIfs4wKPyc9D', ... }], [{'deleted': True, ... }])
Args:
records(``list``): Records to insert
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
Returns:
records (``tuple``): (new_records, deleted_records)
"""
all_record_ids = [r['id'] for r in self.get_all(**options)]
deleted_records = self.batch_delete(all_record_ids)
new_records = self.batch_insert(records)
return (new_records, deleted_records)
def __repr__(self):
return '<Airtable table:{}>'.format(self.table_name)
|
gtalarico/airtable-python-wrapper
|
airtable/airtable.py
|
Airtable.delete_by_field
|
python
|
def delete_by_field(self, field_name, field_value, **options):
"""
Deletes first record to match provided ``field_name`` and
``field_value``.
>>> record = airtable.delete_by_field('Employee Id', 'DD13332454')
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): Deleted Record
"""
record = self.match(field_name, field_value, **options)
record_url = self.record_url(record['id'])
return self._delete(record_url)
|
Deletes first record to match provided ``field_name`` and
``field_value``.
>>> record = airtable.delete_by_field('Employee Id', 'DD13332454')
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): Deleted Record
|
train
|
https://github.com/gtalarico/airtable-python-wrapper/blob/48b2d806178085b52a31817571e5a1fc3dce4045/airtable/airtable.py#L512-L534
|
[
"def record_url(self, record_id):\n \"\"\" Builds URL with record id \"\"\"\n return posixpath.join(self.url_table, record_id)\n",
"def _delete(self, url):\n return self._request('delete', url)\n",
"def match(self, field_name, field_value, **options):\n \"\"\"\n Returns first match found in :any:`get_all`\n\n >>> airtable.match('Name', 'John')\n {'fields': {'Name': 'John'} }\n\n Args:\n field_name (``str``): Name of field to match (column name).\n field_value (``str``): Value of field to match.\n\n Keyword Args:\n max_records (``int``, optional): The maximum total number of\n records that will be returned. See :any:`MaxRecordsParam`\n view (``str``, optional): The name or ID of a view.\n See :any:`ViewParam`.\n fields (``str``, ``list``, optional): Name of field or fields to\n be retrieved. Default is all fields. See :any:`FieldsParam`.\n sort (``list``, optional): List of fields to sort by.\n Default order is ascending. See :any:`SortParam`.\n\n Returns:\n record (``dict``): First record to match the field_value provided\n \"\"\"\n from_name_and_value = AirtableParams.FormulaParam.from_name_and_value\n formula = from_name_and_value(field_name, field_value)\n options['formula'] = formula\n for record in self.get_all(**options):\n return record\n"
] |
class Airtable():
VERSION = 'v0'
API_BASE_URL = 'https://api.airtable.com/'
API_LIMIT = 1.0 / 5 # 5 per second
API_URL = posixpath.join(API_BASE_URL, VERSION)
def __init__(self, base_key, table_name, api_key=None):
"""
If api_key is not provided, :any:`AirtableAuth` will attempt
to use ``os.environ['AIRTABLE_API_KEY']``
"""
session = requests.Session()
session.auth = AirtableAuth(api_key=api_key)
self.session = session
self.table_name = table_name
url_safe_table_name = quote(table_name, safe='')
self.url_table = posixpath.join(self.API_URL, base_key,
url_safe_table_name)
self.is_authenticated = self.validate_session(self.url_table)
def validate_session(self, url):
response = self.session.get(url, params={'maxRecords': 1})
if response.ok:
return True
elif response.status_code == 404:
raise ValueError('Invalid base or table name: {}'.format(url))
else:
raise ValueError(
'Authentication failed: {}'.format(response.reason))
def _process_params(self, params):
"""
Process params names or values as needed using filters
"""
new_params = OrderedDict()
for param_name, param_value in sorted(params.items()):
param_value = params[param_name]
ParamClass = AirtableParams._get(param_name)
new_params.update(ParamClass(param_value).to_param_dict())
return new_params
def _process_response(self, response):
try:
response.raise_for_status()
except requests.exceptions.HTTPError as exc:
err_msg = str(exc)
# Reports Decoded 422 Url for better troubleshooting
# Disabled in IronPython Bug:
# https://github.com/IronLanguages/ironpython2/issues/242
if not IS_IPY and response.status_code == 422:
err_msg = err_msg.replace(response.url, unquote(response.url))
err_msg += (' (Decoded URL)')
# Attempt to get Error message from response, Issue #16
try:
error_dict = response.json()
except json.decoder.JSONDecodeError:
pass
else:
if 'error' in error_dict:
err_msg += ' [Error: {}]'.format(error_dict['error'])
raise requests.exceptions.HTTPError(err_msg)
else:
return response.json()
def record_url(self, record_id):
""" Builds URL with record id """
return posixpath.join(self.url_table, record_id)
def _request(self, method, url, params=None, json_data=None):
response = self.session.request(method, url, params=params,
json=json_data)
return self._process_response(response)
def _get(self, url, **params):
processed_params = self._process_params(params)
return self._request('get', url, params=processed_params)
def _post(self, url, json_data):
return self._request('post', url, json_data=json_data)
def _put(self, url, json_data):
return self._request('put', url, json_data=json_data)
def _patch(self, url, json_data):
return self._request('patch', url, json_data=json_data)
def _delete(self, url):
return self._request('delete', url)
def get(self, record_id):
"""
Retrieves a record by its id
>>> record = airtable.get('recwPQIfs4wKPyc9D')
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Record
"""
record_url = self.record_url(record_id)
return self._get(record_url)
def get_iter(self, **options):
"""
Record Retriever Iterator
Returns iterator with lists in batches according to pageSize.
To get all records at once use :any:`get_all`
>>> for page in airtable.get_iter():
... for record in page:
... print(record)
[{'fields': ... }, ...]
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
page_size (``int``, optional ): The number of records returned
in each request. Must be less than or equal to 100.
Default is 100. See :any:`PageSizeParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
formula (``str``, optional): Airtable formula.
See :any:`FormulaParam`.
Returns:
iterator (``list``): List of Records, grouped by pageSize
"""
offset = None
while True:
data = self._get(self.url_table, offset=offset, **options)
records = data.get('records', [])
time.sleep(self.API_LIMIT)
yield records
offset = data.get('offset')
if not offset:
break
def get_all(self, **options):
"""
Retrieves all records repetitively and returns a single list.
>>> airtable.get_all()
>>> airtable.get_all(view='MyView', fields=['ColA', '-ColB'])
>>> airtable.get_all(maxRecords=50)
[{'fields': ... }, ...]
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
formula (``str``, optional): Airtable formula.
See :any:`FormulaParam`.
Returns:
records (``list``): List of Records
>>> records = get_all(maxRecords=3, view='All')
"""
all_records = []
for records in self.get_iter(**options):
all_records.extend(records)
return all_records
def match(self, field_name, field_value, **options):
"""
Returns first match found in :any:`get_all`
>>> airtable.match('Name', 'John')
{'fields': {'Name': 'John'} }
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): First record to match the field_value provided
"""
from_name_and_value = AirtableParams.FormulaParam.from_name_and_value
formula = from_name_and_value(field_name, field_value)
options['formula'] = formula
for record in self.get_all(**options):
return record
else:
return {}
def search(self, field_name, field_value, record=None, **options):
"""
Returns all matching records found in :any:`get_all`
>>> airtable.search('Gender', 'Male')
[{'fields': {'Name': 'John', 'Gender': 'Male'}, ... ]
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
records (``list``): All records that matched ``field_value``
"""
records = []
from_name_and_value = AirtableParams.FormulaParam.from_name_and_value
formula = from_name_and_value(field_name, field_value)
options['formula'] = formula
records = self.get_all(**options)
return records
def insert(self, fields, typecast=False):
"""
Inserts a record
>>> record = {'Name': 'John'}
>>> airtable.insert(record)
Args:
fields(``dict``): Fields to insert.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Inserted record
"""
return self._post(self.url_table, json_data={"fields": fields, "typecast": typecast})
def _batch_request(self, func, iterable):
""" Internal Function to limit batch calls to API limit """
responses = []
for item in iterable:
responses.append(func(item))
time.sleep(self.API_LIMIT)
return responses
def batch_insert(self, records, typecast=False):
"""
Calls :any:`insert` repetitively, following set API Rate Limit (5/sec)
To change the rate limit use ``airtable.API_LIMIT = 0.2``
(5 per second)
>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
>>> airtable.batch_insert(records)
Args:
records(``list``): Records to insert
typecast(``boolean``): Automatic data conversion from string values.
Returns:
records (``list``): list of added records
"""
return self._batch_request(self.insert, records)
def update(self, record_id, fields, typecast=False):
"""
Updates a record by its record id.
Only Fields passed are updated, the rest are left as is.
>>> record = airtable.match('Employee Id', 'DD13332454')
>>> fields = {'Status': 'Fired'}
>>> airtable.update(record['id'], fields)
Args:
record_id(``str``): Id of Record to update
fields(``dict``): Fields to update.
Must be dictionary with Column names as Key
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Updated record
"""
record_url = self.record_url(record_id)
return self._patch(record_url, json_data={"fields": fields, "typecast": typecast})
def update_by_field(self, field_name, field_value, fields, typecast=False, **options):
"""
Updates the first record to match field name and value.
Only Fields passed are updated, the rest are left as is.
>>> record = {'Name': 'John', 'Tel': '540-255-5522'}
>>> airtable.update_by_field('Name', 'John', record)
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
fields(``dict``): Fields to update.
Must be dictionary with Column names as Key
typecast(``boolean``): Automatic data conversion from string values.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): Updated record
"""
record = self.match(field_name, field_value, **options)
return {} if not record else self.update(record['id'], fields, typecast)
def replace(self, record_id, fields, typecast=False):
"""
Replaces a record by its record id.
All Fields are updated to match the new ``fields`` provided.
If a field is not included in ``fields``, value will bet set to null.
To update only selected fields, use :any:`update`.
>>> record = airtable.match('Seat Number', '22A')
>>> fields = {'PassangerName': 'Mike', 'Passport': 'YASD232-23'}
>>> airtable.replace(record['id'], fields)
Args:
record_id(``str``): Id of Record to update
fields(``dict``): Fields to replace with.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): New record
"""
record_url = self.record_url(record_id)
return self._put(record_url, json_data={"fields": fields, "typecast": typecast})
def replace_by_field(self, field_name, field_value, fields, typecast=False, **options):
"""
Replaces the first record to match field name and value.
All Fields are updated to match the new ``fields`` provided.
If a field is not included in ``fields``, value will bet set to null.
To update only selected fields, use :any:`update`.
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
fields(``dict``): Fields to replace with.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): New record
"""
record = self.match(field_name, field_value, **options)
return {} if not record else self.replace(record['id'], fields, typecast)
def delete(self, record_id):
"""
Deletes a record by its id
>>> record = airtable.match('Employee Id', 'DD13332454')
>>> airtable.delete(record['id'])
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Deleted Record
"""
record_url = self.record_url(record_id)
return self._delete(record_url)
def delete_by_field(self, field_name, field_value, **options):
"""
Deletes first record to match provided ``field_name`` and
``field_value``.
>>> record = airtable.delete_by_field('Employee Id', 'DD13332454')
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): Deleted Record
"""
record = self.match(field_name, field_value, **options)
record_url = self.record_url(record['id'])
return self._delete(record_url)
def batch_delete(self, record_ids):
"""
Calls :any:`delete` repetitively, following set API Rate Limit (5/sec)
To change the rate limit set value of ``airtable.API_LIMIT`` to
the time in seconds it should sleep before calling the function again.
>>> record_ids = ['recwPQIfs4wKPyc9D', 'recwDxIfs3wDPyc3F']
>>> airtable.batch_delete(records_ids)
Args:
records(``list``): Record Ids to delete
Returns:
records(``list``): list of records deleted
"""
return self._batch_request(self.delete, record_ids)
def mirror(self, records, **options):
"""
Deletes all records on table or view and replaces with records.
>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
>>> record = airtable.,mirror(records)
If view options are provided, only records visible on that view will
be deleted.
>>> record = airtable.mirror(records, view='View')
([{'id': 'recwPQIfs4wKPyc9D', ... }], [{'deleted': True, ... }])
Args:
records(``list``): Records to insert
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
Returns:
records (``tuple``): (new_records, deleted_records)
"""
all_record_ids = [r['id'] for r in self.get_all(**options)]
deleted_records = self.batch_delete(all_record_ids)
new_records = self.batch_insert(records)
return (new_records, deleted_records)
def __repr__(self):
return '<Airtable table:{}>'.format(self.table_name)
|
gtalarico/airtable-python-wrapper
|
airtable/airtable.py
|
Airtable.mirror
|
python
|
def mirror(self, records, **options):
"""
Deletes all records on table or view and replaces with records.
>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
>>> record = airtable.,mirror(records)
If view options are provided, only records visible on that view will
be deleted.
>>> record = airtable.mirror(records, view='View')
([{'id': 'recwPQIfs4wKPyc9D', ... }], [{'deleted': True, ... }])
Args:
records(``list``): Records to insert
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
Returns:
records (``tuple``): (new_records, deleted_records)
"""
all_record_ids = [r['id'] for r in self.get_all(**options)]
deleted_records = self.batch_delete(all_record_ids)
new_records = self.batch_insert(records)
return (new_records, deleted_records)
|
Deletes all records on table or view and replaces with records.
>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
>>> record = airtable.,mirror(records)
If view options are provided, only records visible on that view will
be deleted.
>>> record = airtable.mirror(records, view='View')
([{'id': 'recwPQIfs4wKPyc9D', ... }], [{'deleted': True, ... }])
Args:
records(``list``): Records to insert
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
Returns:
records (``tuple``): (new_records, deleted_records)
|
train
|
https://github.com/gtalarico/airtable-python-wrapper/blob/48b2d806178085b52a31817571e5a1fc3dce4045/airtable/airtable.py#L554-L584
|
[
"def get_all(self, **options):\n \"\"\"\n Retrieves all records repetitively and returns a single list.\n\n >>> airtable.get_all()\n >>> airtable.get_all(view='MyView', fields=['ColA', '-ColB'])\n >>> airtable.get_all(maxRecords=50)\n [{'fields': ... }, ...]\n\n Keyword Args:\n max_records (``int``, optional): The maximum total number of\n records that will be returned. See :any:`MaxRecordsParam`\n view (``str``, optional): The name or ID of a view.\n See :any:`ViewParam`.\n fields (``str``, ``list``, optional): Name of field or fields to\n be retrieved. Default is all fields. See :any:`FieldsParam`.\n sort (``list``, optional): List of fields to sort by.\n Default order is ascending. See :any:`SortParam`.\n formula (``str``, optional): Airtable formula.\n See :any:`FormulaParam`.\n\n Returns:\n records (``list``): List of Records\n\n >>> records = get_all(maxRecords=3, view='All')\n\n \"\"\"\n all_records = []\n for records in self.get_iter(**options):\n all_records.extend(records)\n return all_records\n",
"def batch_insert(self, records, typecast=False):\n \"\"\"\n Calls :any:`insert` repetitively, following set API Rate Limit (5/sec)\n To change the rate limit use ``airtable.API_LIMIT = 0.2``\n (5 per second)\n\n >>> records = [{'Name': 'John'}, {'Name': 'Marc'}]\n >>> airtable.batch_insert(records)\n\n Args:\n records(``list``): Records to insert\n typecast(``boolean``): Automatic data conversion from string values.\n\n Returns:\n records (``list``): list of added records\n\n \"\"\"\n return self._batch_request(self.insert, records)\n",
"def batch_delete(self, record_ids):\n \"\"\"\n Calls :any:`delete` repetitively, following set API Rate Limit (5/sec)\n To change the rate limit set value of ``airtable.API_LIMIT`` to\n the time in seconds it should sleep before calling the function again.\n\n >>> record_ids = ['recwPQIfs4wKPyc9D', 'recwDxIfs3wDPyc3F']\n >>> airtable.batch_delete(records_ids)\n\n Args:\n records(``list``): Record Ids to delete\n\n Returns:\n records(``list``): list of records deleted\n\n \"\"\"\n return self._batch_request(self.delete, record_ids)\n"
] |
class Airtable():
VERSION = 'v0'
API_BASE_URL = 'https://api.airtable.com/'
API_LIMIT = 1.0 / 5 # 5 per second
API_URL = posixpath.join(API_BASE_URL, VERSION)
def __init__(self, base_key, table_name, api_key=None):
"""
If api_key is not provided, :any:`AirtableAuth` will attempt
to use ``os.environ['AIRTABLE_API_KEY']``
"""
session = requests.Session()
session.auth = AirtableAuth(api_key=api_key)
self.session = session
self.table_name = table_name
url_safe_table_name = quote(table_name, safe='')
self.url_table = posixpath.join(self.API_URL, base_key,
url_safe_table_name)
self.is_authenticated = self.validate_session(self.url_table)
def validate_session(self, url):
response = self.session.get(url, params={'maxRecords': 1})
if response.ok:
return True
elif response.status_code == 404:
raise ValueError('Invalid base or table name: {}'.format(url))
else:
raise ValueError(
'Authentication failed: {}'.format(response.reason))
def _process_params(self, params):
"""
Process params names or values as needed using filters
"""
new_params = OrderedDict()
for param_name, param_value in sorted(params.items()):
param_value = params[param_name]
ParamClass = AirtableParams._get(param_name)
new_params.update(ParamClass(param_value).to_param_dict())
return new_params
def _process_response(self, response):
try:
response.raise_for_status()
except requests.exceptions.HTTPError as exc:
err_msg = str(exc)
# Reports Decoded 422 Url for better troubleshooting
# Disabled in IronPython Bug:
# https://github.com/IronLanguages/ironpython2/issues/242
if not IS_IPY and response.status_code == 422:
err_msg = err_msg.replace(response.url, unquote(response.url))
err_msg += (' (Decoded URL)')
# Attempt to get Error message from response, Issue #16
try:
error_dict = response.json()
except json.decoder.JSONDecodeError:
pass
else:
if 'error' in error_dict:
err_msg += ' [Error: {}]'.format(error_dict['error'])
raise requests.exceptions.HTTPError(err_msg)
else:
return response.json()
def record_url(self, record_id):
""" Builds URL with record id """
return posixpath.join(self.url_table, record_id)
def _request(self, method, url, params=None, json_data=None):
response = self.session.request(method, url, params=params,
json=json_data)
return self._process_response(response)
def _get(self, url, **params):
processed_params = self._process_params(params)
return self._request('get', url, params=processed_params)
def _post(self, url, json_data):
return self._request('post', url, json_data=json_data)
def _put(self, url, json_data):
return self._request('put', url, json_data=json_data)
def _patch(self, url, json_data):
return self._request('patch', url, json_data=json_data)
def _delete(self, url):
return self._request('delete', url)
def get(self, record_id):
"""
Retrieves a record by its id
>>> record = airtable.get('recwPQIfs4wKPyc9D')
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Record
"""
record_url = self.record_url(record_id)
return self._get(record_url)
def get_iter(self, **options):
"""
Record Retriever Iterator
Returns iterator with lists in batches according to pageSize.
To get all records at once use :any:`get_all`
>>> for page in airtable.get_iter():
... for record in page:
... print(record)
[{'fields': ... }, ...]
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
page_size (``int``, optional ): The number of records returned
in each request. Must be less than or equal to 100.
Default is 100. See :any:`PageSizeParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
formula (``str``, optional): Airtable formula.
See :any:`FormulaParam`.
Returns:
iterator (``list``): List of Records, grouped by pageSize
"""
offset = None
while True:
data = self._get(self.url_table, offset=offset, **options)
records = data.get('records', [])
time.sleep(self.API_LIMIT)
yield records
offset = data.get('offset')
if not offset:
break
def get_all(self, **options):
"""
Retrieves all records repetitively and returns a single list.
>>> airtable.get_all()
>>> airtable.get_all(view='MyView', fields=['ColA', '-ColB'])
>>> airtable.get_all(maxRecords=50)
[{'fields': ... }, ...]
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
formula (``str``, optional): Airtable formula.
See :any:`FormulaParam`.
Returns:
records (``list``): List of Records
>>> records = get_all(maxRecords=3, view='All')
"""
all_records = []
for records in self.get_iter(**options):
all_records.extend(records)
return all_records
def match(self, field_name, field_value, **options):
"""
Returns first match found in :any:`get_all`
>>> airtable.match('Name', 'John')
{'fields': {'Name': 'John'} }
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): First record to match the field_value provided
"""
from_name_and_value = AirtableParams.FormulaParam.from_name_and_value
formula = from_name_and_value(field_name, field_value)
options['formula'] = formula
for record in self.get_all(**options):
return record
else:
return {}
def search(self, field_name, field_value, record=None, **options):
"""
Returns all matching records found in :any:`get_all`
>>> airtable.search('Gender', 'Male')
[{'fields': {'Name': 'John', 'Gender': 'Male'}, ... ]
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
records (``list``): All records that matched ``field_value``
"""
records = []
from_name_and_value = AirtableParams.FormulaParam.from_name_and_value
formula = from_name_and_value(field_name, field_value)
options['formula'] = formula
records = self.get_all(**options)
return records
def insert(self, fields, typecast=False):
"""
Inserts a record
>>> record = {'Name': 'John'}
>>> airtable.insert(record)
Args:
fields(``dict``): Fields to insert.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Inserted record
"""
return self._post(self.url_table, json_data={"fields": fields, "typecast": typecast})
def _batch_request(self, func, iterable):
""" Internal Function to limit batch calls to API limit """
responses = []
for item in iterable:
responses.append(func(item))
time.sleep(self.API_LIMIT)
return responses
def batch_insert(self, records, typecast=False):
"""
Calls :any:`insert` repetitively, following set API Rate Limit (5/sec)
To change the rate limit use ``airtable.API_LIMIT = 0.2``
(5 per second)
>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
>>> airtable.batch_insert(records)
Args:
records(``list``): Records to insert
typecast(``boolean``): Automatic data conversion from string values.
Returns:
records (``list``): list of added records
"""
return self._batch_request(self.insert, records)
def update(self, record_id, fields, typecast=False):
"""
Updates a record by its record id.
Only Fields passed are updated, the rest are left as is.
>>> record = airtable.match('Employee Id', 'DD13332454')
>>> fields = {'Status': 'Fired'}
>>> airtable.update(record['id'], fields)
Args:
record_id(``str``): Id of Record to update
fields(``dict``): Fields to update.
Must be dictionary with Column names as Key
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Updated record
"""
record_url = self.record_url(record_id)
return self._patch(record_url, json_data={"fields": fields, "typecast": typecast})
def update_by_field(self, field_name, field_value, fields, typecast=False, **options):
"""
Updates the first record to match field name and value.
Only Fields passed are updated, the rest are left as is.
>>> record = {'Name': 'John', 'Tel': '540-255-5522'}
>>> airtable.update_by_field('Name', 'John', record)
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
fields(``dict``): Fields to update.
Must be dictionary with Column names as Key
typecast(``boolean``): Automatic data conversion from string values.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): Updated record
"""
record = self.match(field_name, field_value, **options)
return {} if not record else self.update(record['id'], fields, typecast)
def replace(self, record_id, fields, typecast=False):
"""
Replaces a record by its record id.
All Fields are updated to match the new ``fields`` provided.
If a field is not included in ``fields``, value will bet set to null.
To update only selected fields, use :any:`update`.
>>> record = airtable.match('Seat Number', '22A')
>>> fields = {'PassangerName': 'Mike', 'Passport': 'YASD232-23'}
>>> airtable.replace(record['id'], fields)
Args:
record_id(``str``): Id of Record to update
fields(``dict``): Fields to replace with.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): New record
"""
record_url = self.record_url(record_id)
return self._put(record_url, json_data={"fields": fields, "typecast": typecast})
def replace_by_field(self, field_name, field_value, fields, typecast=False, **options):
"""
Replaces the first record to match field name and value.
All Fields are updated to match the new ``fields`` provided.
If a field is not included in ``fields``, value will bet set to null.
To update only selected fields, use :any:`update`.
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
fields(``dict``): Fields to replace with.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): New record
"""
record = self.match(field_name, field_value, **options)
return {} if not record else self.replace(record['id'], fields, typecast)
def delete(self, record_id):
"""
Deletes a record by its id
>>> record = airtable.match('Employee Id', 'DD13332454')
>>> airtable.delete(record['id'])
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Deleted Record
"""
record_url = self.record_url(record_id)
return self._delete(record_url)
def delete_by_field(self, field_name, field_value, **options):
"""
Deletes first record to match provided ``field_name`` and
``field_value``.
>>> record = airtable.delete_by_field('Employee Id', 'DD13332454')
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
record (``dict``): Deleted Record
"""
record = self.match(field_name, field_value, **options)
record_url = self.record_url(record['id'])
return self._delete(record_url)
def batch_delete(self, record_ids):
"""
Calls :any:`delete` repetitively, following set API Rate Limit (5/sec)
To change the rate limit set value of ``airtable.API_LIMIT`` to
the time in seconds it should sleep before calling the function again.
>>> record_ids = ['recwPQIfs4wKPyc9D', 'recwDxIfs3wDPyc3F']
>>> airtable.batch_delete(records_ids)
Args:
records(``list``): Record Ids to delete
Returns:
records(``list``): list of records deleted
"""
return self._batch_request(self.delete, record_ids)
def mirror(self, records, **options):
"""
Deletes all records on table or view and replaces with records.
>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
>>> record = airtable.,mirror(records)
If view options are provided, only records visible on that view will
be deleted.
>>> record = airtable.mirror(records, view='View')
([{'id': 'recwPQIfs4wKPyc9D', ... }], [{'deleted': True, ... }])
Args:
records(``list``): Records to insert
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
Returns:
records (``tuple``): (new_records, deleted_records)
"""
all_record_ids = [r['id'] for r in self.get_all(**options)]
deleted_records = self.batch_delete(all_record_ids)
new_records = self.batch_insert(records)
return (new_records, deleted_records)
def __repr__(self):
return '<Airtable table:{}>'.format(self.table_name)
|
adamrehn/slidingwindow
|
slidingwindow/SlidingWindow.py
|
generate
|
python
|
def generate(data, dimOrder, maxWindowSize, overlapPercent, transforms = []):
# Determine the dimensions of the input data
width = data.shape[dimOrder.index('w')]
height = data.shape[dimOrder.index('h')]
# Generate the windows
return generateForSize(width, height, dimOrder, maxWindowSize, overlapPercent, transforms)
|
Generates a set of sliding windows for the specified dataset.
|
train
|
https://github.com/adamrehn/slidingwindow/blob/17ea9395b48671e8cb7321b9510c6b25fec5e45f/slidingwindow/SlidingWindow.py#L87-L97
|
[
"def generateForSize(width, height, dimOrder, maxWindowSize, overlapPercent, transforms = []):\n\t\"\"\"\n\tGenerates a set of sliding windows for a dataset with the specified dimensions and order.\n\t\"\"\"\n\n\t# If the input data is smaller than the specified window size,\n\t# clip the window size to the input size on both dimensions\n\twindowSizeX = min(maxWindowSize, width)\n\twindowSizeY = min(maxWindowSize, height)\n\n\t# Compute the window overlap and step size\n\twindowOverlapX = int(math.floor(windowSizeX * overlapPercent))\n\twindowOverlapY = int(math.floor(windowSizeY * overlapPercent))\n\tstepSizeX = windowSizeX - windowOverlapX\n\tstepSizeY = windowSizeY - windowOverlapY\n\n\t# Determine how many windows we will need in order to cover the input data\n\tlastX = width - windowSizeX\n\tlastY = height - windowSizeY\n\txOffsets = list(range(0, lastX+1, stepSizeX))\n\tyOffsets = list(range(0, lastY+1, stepSizeY))\n\n\t# Unless the input data dimensions are exact multiples of the step size,\n\t# we will need one additional row and column of windows to get 100% coverage\n\tif len(xOffsets) == 0 or xOffsets[-1] != lastX:\n\t\txOffsets.append(lastX)\n\tif len(yOffsets) == 0 or yOffsets[-1] != lastY:\n\t\tyOffsets.append(lastY)\n\n\t# Generate the list of windows\n\twindows = []\n\tfor xOffset in xOffsets:\n\t\tfor yOffset in yOffsets:\n\t\t\tfor transform in [None] + transforms:\n\t\t\t\twindows.append(SlidingWindow(\n\t\t\t\t\tx=xOffset,\n\t\t\t\t\ty=yOffset,\n\t\t\t\t\tw=windowSizeX,\n\t\t\t\t\th=windowSizeY,\n\t\t\t\t\tdimOrder=dimOrder,\n\t\t\t\t\ttransform=transform\n\t\t\t\t))\n\n\treturn windows\n"
] |
import math
class DimOrder(object):
"""
Represents the order of the dimensions in a dataset's shape.
"""
ChannelHeightWidth = ['c', 'h', 'w']
HeightWidthChannel = ['h', 'w', 'c']
class SlidingWindow(object):
"""
Represents a single window into a larger dataset.
"""
def __init__(self, x, y, w, h, dimOrder, transform = None):
"""
Creates a new window with the specified dimensions and transform
"""
self.x = x
self.y = y
self.w = w
self.h = h
self.dimOrder = dimOrder
self.transform = transform
def apply(self, matrix):
"""
Slices the supplied matrix and applies any transform bound to this window
"""
view = matrix[ self.indices() ]
return self.transform(view) if self.transform != None else view
def getRect(self):
"""
Returns the window bounds as a tuple of (x,y,w,h)
"""
return (self.x, self.y, self.w, self.h)
def setRect(self, rect):
"""
Sets the window bounds from a tuple of (x,y,w,h)
"""
self.x, self.y, self.w, self.h = rect
def indices(self, includeChannel=True):
"""
Retrieves the indices for this window as a tuple of slices
"""
if self.dimOrder == DimOrder.HeightWidthChannel:
# Equivalent to [self.y:self.y+self.h+1, self.x:self.x+self.w+1]
return (
slice(self.y, self.y+self.h),
slice(self.x, self.x+self.w)
)
elif self.dimOrder == DimOrder.ChannelHeightWidth:
if includeChannel is True:
# Equivalent to [:, self.y:self.y+self.h+1, self.x:self.x+self.w+1]
return (
slice(None, None),
slice(self.y, self.y+self.h),
slice(self.x, self.x+self.w)
)
else:
# Equivalent to [self.y:self.y+self.h+1, self.x:self.x+self.w+1]
return (
slice(self.y, self.y+self.h),
slice(self.x, self.x+self.w)
)
else:
raise Error('Unsupported order of dimensions: ' + str(self.dimOrder))
def __str__(self):
return '(' + str(self.x) + ',' + str(self.y) + ',' + str(self.w) + ',' + str(self.h) + ')'
def __repr__(self):
return self.__str__()
def generate(data, dimOrder, maxWindowSize, overlapPercent, transforms = []):
"""
Generates a set of sliding windows for the specified dataset.
"""
# Determine the dimensions of the input data
width = data.shape[dimOrder.index('w')]
height = data.shape[dimOrder.index('h')]
# Generate the windows
return generateForSize(width, height, dimOrder, maxWindowSize, overlapPercent, transforms)
def generateForSize(width, height, dimOrder, maxWindowSize, overlapPercent, transforms = []):
"""
Generates a set of sliding windows for a dataset with the specified dimensions and order.
"""
# If the input data is smaller than the specified window size,
# clip the window size to the input size on both dimensions
windowSizeX = min(maxWindowSize, width)
windowSizeY = min(maxWindowSize, height)
# Compute the window overlap and step size
windowOverlapX = int(math.floor(windowSizeX * overlapPercent))
windowOverlapY = int(math.floor(windowSizeY * overlapPercent))
stepSizeX = windowSizeX - windowOverlapX
stepSizeY = windowSizeY - windowOverlapY
# Determine how many windows we will need in order to cover the input data
lastX = width - windowSizeX
lastY = height - windowSizeY
xOffsets = list(range(0, lastX+1, stepSizeX))
yOffsets = list(range(0, lastY+1, stepSizeY))
# Unless the input data dimensions are exact multiples of the step size,
# we will need one additional row and column of windows to get 100% coverage
if len(xOffsets) == 0 or xOffsets[-1] != lastX:
xOffsets.append(lastX)
if len(yOffsets) == 0 or yOffsets[-1] != lastY:
yOffsets.append(lastY)
# Generate the list of windows
windows = []
for xOffset in xOffsets:
for yOffset in yOffsets:
for transform in [None] + transforms:
windows.append(SlidingWindow(
x=xOffset,
y=yOffset,
w=windowSizeX,
h=windowSizeY,
dimOrder=dimOrder,
transform=transform
))
return windows
|
adamrehn/slidingwindow
|
slidingwindow/SlidingWindow.py
|
generateForSize
|
python
|
def generateForSize(width, height, dimOrder, maxWindowSize, overlapPercent, transforms = []):
# If the input data is smaller than the specified window size,
# clip the window size to the input size on both dimensions
windowSizeX = min(maxWindowSize, width)
windowSizeY = min(maxWindowSize, height)
# Compute the window overlap and step size
windowOverlapX = int(math.floor(windowSizeX * overlapPercent))
windowOverlapY = int(math.floor(windowSizeY * overlapPercent))
stepSizeX = windowSizeX - windowOverlapX
stepSizeY = windowSizeY - windowOverlapY
# Determine how many windows we will need in order to cover the input data
lastX = width - windowSizeX
lastY = height - windowSizeY
xOffsets = list(range(0, lastX+1, stepSizeX))
yOffsets = list(range(0, lastY+1, stepSizeY))
# Unless the input data dimensions are exact multiples of the step size,
# we will need one additional row and column of windows to get 100% coverage
if len(xOffsets) == 0 or xOffsets[-1] != lastX:
xOffsets.append(lastX)
if len(yOffsets) == 0 or yOffsets[-1] != lastY:
yOffsets.append(lastY)
# Generate the list of windows
windows = []
for xOffset in xOffsets:
for yOffset in yOffsets:
for transform in [None] + transforms:
windows.append(SlidingWindow(
x=xOffset,
y=yOffset,
w=windowSizeX,
h=windowSizeY,
dimOrder=dimOrder,
transform=transform
))
return windows
|
Generates a set of sliding windows for a dataset with the specified dimensions and order.
|
train
|
https://github.com/adamrehn/slidingwindow/blob/17ea9395b48671e8cb7321b9510c6b25fec5e45f/slidingwindow/SlidingWindow.py#L100-L143
| null |
import math
class DimOrder(object):
"""
Represents the order of the dimensions in a dataset's shape.
"""
ChannelHeightWidth = ['c', 'h', 'w']
HeightWidthChannel = ['h', 'w', 'c']
class SlidingWindow(object):
"""
Represents a single window into a larger dataset.
"""
def __init__(self, x, y, w, h, dimOrder, transform = None):
"""
Creates a new window with the specified dimensions and transform
"""
self.x = x
self.y = y
self.w = w
self.h = h
self.dimOrder = dimOrder
self.transform = transform
def apply(self, matrix):
"""
Slices the supplied matrix and applies any transform bound to this window
"""
view = matrix[ self.indices() ]
return self.transform(view) if self.transform != None else view
def getRect(self):
"""
Returns the window bounds as a tuple of (x,y,w,h)
"""
return (self.x, self.y, self.w, self.h)
def setRect(self, rect):
"""
Sets the window bounds from a tuple of (x,y,w,h)
"""
self.x, self.y, self.w, self.h = rect
def indices(self, includeChannel=True):
"""
Retrieves the indices for this window as a tuple of slices
"""
if self.dimOrder == DimOrder.HeightWidthChannel:
# Equivalent to [self.y:self.y+self.h+1, self.x:self.x+self.w+1]
return (
slice(self.y, self.y+self.h),
slice(self.x, self.x+self.w)
)
elif self.dimOrder == DimOrder.ChannelHeightWidth:
if includeChannel is True:
# Equivalent to [:, self.y:self.y+self.h+1, self.x:self.x+self.w+1]
return (
slice(None, None),
slice(self.y, self.y+self.h),
slice(self.x, self.x+self.w)
)
else:
# Equivalent to [self.y:self.y+self.h+1, self.x:self.x+self.w+1]
return (
slice(self.y, self.y+self.h),
slice(self.x, self.x+self.w)
)
else:
raise Error('Unsupported order of dimensions: ' + str(self.dimOrder))
def __str__(self):
return '(' + str(self.x) + ',' + str(self.y) + ',' + str(self.w) + ',' + str(self.h) + ')'
def __repr__(self):
return self.__str__()
def generate(data, dimOrder, maxWindowSize, overlapPercent, transforms = []):
"""
Generates a set of sliding windows for the specified dataset.
"""
# Determine the dimensions of the input data
width = data.shape[dimOrder.index('w')]
height = data.shape[dimOrder.index('h')]
# Generate the windows
return generateForSize(width, height, dimOrder, maxWindowSize, overlapPercent, transforms)
def generateForSize(width, height, dimOrder, maxWindowSize, overlapPercent, transforms = []):
"""
Generates a set of sliding windows for a dataset with the specified dimensions and order.
"""
# If the input data is smaller than the specified window size,
# clip the window size to the input size on both dimensions
windowSizeX = min(maxWindowSize, width)
windowSizeY = min(maxWindowSize, height)
# Compute the window overlap and step size
windowOverlapX = int(math.floor(windowSizeX * overlapPercent))
windowOverlapY = int(math.floor(windowSizeY * overlapPercent))
stepSizeX = windowSizeX - windowOverlapX
stepSizeY = windowSizeY - windowOverlapY
# Determine how many windows we will need in order to cover the input data
lastX = width - windowSizeX
lastY = height - windowSizeY
xOffsets = list(range(0, lastX+1, stepSizeX))
yOffsets = list(range(0, lastY+1, stepSizeY))
# Unless the input data dimensions are exact multiples of the step size,
# we will need one additional row and column of windows to get 100% coverage
if len(xOffsets) == 0 or xOffsets[-1] != lastX:
xOffsets.append(lastX)
if len(yOffsets) == 0 or yOffsets[-1] != lastY:
yOffsets.append(lastY)
# Generate the list of windows
windows = []
for xOffset in xOffsets:
for yOffset in yOffsets:
for transform in [None] + transforms:
windows.append(SlidingWindow(
x=xOffset,
y=yOffset,
w=windowSizeX,
h=windowSizeY,
dimOrder=dimOrder,
transform=transform
))
return windows
|
adamrehn/slidingwindow
|
slidingwindow/SlidingWindow.py
|
SlidingWindow.apply
|
python
|
def apply(self, matrix):
view = matrix[ self.indices() ]
return self.transform(view) if self.transform != None else view
|
Slices the supplied matrix and applies any transform bound to this window
|
train
|
https://github.com/adamrehn/slidingwindow/blob/17ea9395b48671e8cb7321b9510c6b25fec5e45f/slidingwindow/SlidingWindow.py#L27-L32
|
[
"def indices(self, includeChannel=True):\n\t\"\"\"\n\tRetrieves the indices for this window as a tuple of slices\n\t\"\"\"\n\tif self.dimOrder == DimOrder.HeightWidthChannel:\n\n\t\t# Equivalent to [self.y:self.y+self.h+1, self.x:self.x+self.w+1]\n\t\treturn (\n\t\t\tslice(self.y, self.y+self.h),\n\t\t\tslice(self.x, self.x+self.w)\n\t\t)\n\n\telif self.dimOrder == DimOrder.ChannelHeightWidth:\n\n\t\tif includeChannel is True:\n\n\t\t\t# Equivalent to [:, self.y:self.y+self.h+1, self.x:self.x+self.w+1]\n\t\t\treturn (\n\t\t\t\tslice(None, None),\n\t\t\t\tslice(self.y, self.y+self.h),\n\t\t\t\tslice(self.x, self.x+self.w)\n\t\t\t)\n\n\t\telse:\n\n\t\t\t# Equivalent to [self.y:self.y+self.h+1, self.x:self.x+self.w+1]\n\t\t\treturn (\n\t\t\t\tslice(self.y, self.y+self.h),\n\t\t\t\tslice(self.x, self.x+self.w)\n\t\t\t)\n\n\telse:\n\t\traise Error('Unsupported order of dimensions: ' + str(self.dimOrder))\n"
] |
class SlidingWindow(object):
"""
Represents a single window into a larger dataset.
"""
def __init__(self, x, y, w, h, dimOrder, transform = None):
"""
Creates a new window with the specified dimensions and transform
"""
self.x = x
self.y = y
self.w = w
self.h = h
self.dimOrder = dimOrder
self.transform = transform
def getRect(self):
"""
Returns the window bounds as a tuple of (x,y,w,h)
"""
return (self.x, self.y, self.w, self.h)
def setRect(self, rect):
"""
Sets the window bounds from a tuple of (x,y,w,h)
"""
self.x, self.y, self.w, self.h = rect
def indices(self, includeChannel=True):
"""
Retrieves the indices for this window as a tuple of slices
"""
if self.dimOrder == DimOrder.HeightWidthChannel:
# Equivalent to [self.y:self.y+self.h+1, self.x:self.x+self.w+1]
return (
slice(self.y, self.y+self.h),
slice(self.x, self.x+self.w)
)
elif self.dimOrder == DimOrder.ChannelHeightWidth:
if includeChannel is True:
# Equivalent to [:, self.y:self.y+self.h+1, self.x:self.x+self.w+1]
return (
slice(None, None),
slice(self.y, self.y+self.h),
slice(self.x, self.x+self.w)
)
else:
# Equivalent to [self.y:self.y+self.h+1, self.x:self.x+self.w+1]
return (
slice(self.y, self.y+self.h),
slice(self.x, self.x+self.w)
)
else:
raise Error('Unsupported order of dimensions: ' + str(self.dimOrder))
def __str__(self):
return '(' + str(self.x) + ',' + str(self.y) + ',' + str(self.w) + ',' + str(self.h) + ')'
def __repr__(self):
return self.__str__()
|
adamrehn/slidingwindow
|
slidingwindow/SlidingWindow.py
|
SlidingWindow.getRect
|
python
|
def getRect(self):
return (self.x, self.y, self.w, self.h)
|
Returns the window bounds as a tuple of (x,y,w,h)
|
train
|
https://github.com/adamrehn/slidingwindow/blob/17ea9395b48671e8cb7321b9510c6b25fec5e45f/slidingwindow/SlidingWindow.py#L34-L38
| null |
class SlidingWindow(object):
"""
Represents a single window into a larger dataset.
"""
def __init__(self, x, y, w, h, dimOrder, transform = None):
"""
Creates a new window with the specified dimensions and transform
"""
self.x = x
self.y = y
self.w = w
self.h = h
self.dimOrder = dimOrder
self.transform = transform
def apply(self, matrix):
"""
Slices the supplied matrix and applies any transform bound to this window
"""
view = matrix[ self.indices() ]
return self.transform(view) if self.transform != None else view
def setRect(self, rect):
"""
Sets the window bounds from a tuple of (x,y,w,h)
"""
self.x, self.y, self.w, self.h = rect
def indices(self, includeChannel=True):
"""
Retrieves the indices for this window as a tuple of slices
"""
if self.dimOrder == DimOrder.HeightWidthChannel:
# Equivalent to [self.y:self.y+self.h+1, self.x:self.x+self.w+1]
return (
slice(self.y, self.y+self.h),
slice(self.x, self.x+self.w)
)
elif self.dimOrder == DimOrder.ChannelHeightWidth:
if includeChannel is True:
# Equivalent to [:, self.y:self.y+self.h+1, self.x:self.x+self.w+1]
return (
slice(None, None),
slice(self.y, self.y+self.h),
slice(self.x, self.x+self.w)
)
else:
# Equivalent to [self.y:self.y+self.h+1, self.x:self.x+self.w+1]
return (
slice(self.y, self.y+self.h),
slice(self.x, self.x+self.w)
)
else:
raise Error('Unsupported order of dimensions: ' + str(self.dimOrder))
def __str__(self):
return '(' + str(self.x) + ',' + str(self.y) + ',' + str(self.w) + ',' + str(self.h) + ')'
def __repr__(self):
return self.__str__()
|
adamrehn/slidingwindow
|
slidingwindow/SlidingWindow.py
|
SlidingWindow.setRect
|
python
|
def setRect(self, rect):
self.x, self.y, self.w, self.h = rect
|
Sets the window bounds from a tuple of (x,y,w,h)
|
train
|
https://github.com/adamrehn/slidingwindow/blob/17ea9395b48671e8cb7321b9510c6b25fec5e45f/slidingwindow/SlidingWindow.py#L40-L44
| null |
class SlidingWindow(object):
"""
Represents a single window into a larger dataset.
"""
def __init__(self, x, y, w, h, dimOrder, transform = None):
"""
Creates a new window with the specified dimensions and transform
"""
self.x = x
self.y = y
self.w = w
self.h = h
self.dimOrder = dimOrder
self.transform = transform
def apply(self, matrix):
"""
Slices the supplied matrix and applies any transform bound to this window
"""
view = matrix[ self.indices() ]
return self.transform(view) if self.transform != None else view
def getRect(self):
"""
Returns the window bounds as a tuple of (x,y,w,h)
"""
return (self.x, self.y, self.w, self.h)
def indices(self, includeChannel=True):
"""
Retrieves the indices for this window as a tuple of slices
"""
if self.dimOrder == DimOrder.HeightWidthChannel:
# Equivalent to [self.y:self.y+self.h+1, self.x:self.x+self.w+1]
return (
slice(self.y, self.y+self.h),
slice(self.x, self.x+self.w)
)
elif self.dimOrder == DimOrder.ChannelHeightWidth:
if includeChannel is True:
# Equivalent to [:, self.y:self.y+self.h+1, self.x:self.x+self.w+1]
return (
slice(None, None),
slice(self.y, self.y+self.h),
slice(self.x, self.x+self.w)
)
else:
# Equivalent to [self.y:self.y+self.h+1, self.x:self.x+self.w+1]
return (
slice(self.y, self.y+self.h),
slice(self.x, self.x+self.w)
)
else:
raise Error('Unsupported order of dimensions: ' + str(self.dimOrder))
def __str__(self):
return '(' + str(self.x) + ',' + str(self.y) + ',' + str(self.w) + ',' + str(self.h) + ')'
def __repr__(self):
return self.__str__()
|
adamrehn/slidingwindow
|
slidingwindow/SlidingWindow.py
|
SlidingWindow.indices
|
python
|
def indices(self, includeChannel=True):
if self.dimOrder == DimOrder.HeightWidthChannel:
# Equivalent to [self.y:self.y+self.h+1, self.x:self.x+self.w+1]
return (
slice(self.y, self.y+self.h),
slice(self.x, self.x+self.w)
)
elif self.dimOrder == DimOrder.ChannelHeightWidth:
if includeChannel is True:
# Equivalent to [:, self.y:self.y+self.h+1, self.x:self.x+self.w+1]
return (
slice(None, None),
slice(self.y, self.y+self.h),
slice(self.x, self.x+self.w)
)
else:
# Equivalent to [self.y:self.y+self.h+1, self.x:self.x+self.w+1]
return (
slice(self.y, self.y+self.h),
slice(self.x, self.x+self.w)
)
else:
raise Error('Unsupported order of dimensions: ' + str(self.dimOrder))
|
Retrieves the indices for this window as a tuple of slices
|
train
|
https://github.com/adamrehn/slidingwindow/blob/17ea9395b48671e8cb7321b9510c6b25fec5e45f/slidingwindow/SlidingWindow.py#L46-L78
| null |
class SlidingWindow(object):
"""
Represents a single window into a larger dataset.
"""
def __init__(self, x, y, w, h, dimOrder, transform = None):
"""
Creates a new window with the specified dimensions and transform
"""
self.x = x
self.y = y
self.w = w
self.h = h
self.dimOrder = dimOrder
self.transform = transform
def apply(self, matrix):
"""
Slices the supplied matrix and applies any transform bound to this window
"""
view = matrix[ self.indices() ]
return self.transform(view) if self.transform != None else view
def getRect(self):
"""
Returns the window bounds as a tuple of (x,y,w,h)
"""
return (self.x, self.y, self.w, self.h)
def setRect(self, rect):
"""
Sets the window bounds from a tuple of (x,y,w,h)
"""
self.x, self.y, self.w, self.h = rect
def indices(self, includeChannel=True):
"""
Retrieves the indices for this window as a tuple of slices
"""
if self.dimOrder == DimOrder.HeightWidthChannel:
# Equivalent to [self.y:self.y+self.h+1, self.x:self.x+self.w+1]
return (
slice(self.y, self.y+self.h),
slice(self.x, self.x+self.w)
)
elif self.dimOrder == DimOrder.ChannelHeightWidth:
if includeChannel is True:
# Equivalent to [:, self.y:self.y+self.h+1, self.x:self.x+self.w+1]
return (
slice(None, None),
slice(self.y, self.y+self.h),
slice(self.x, self.x+self.w)
)
else:
# Equivalent to [self.y:self.y+self.h+1, self.x:self.x+self.w+1]
return (
slice(self.y, self.y+self.h),
slice(self.x, self.x+self.w)
)
else:
raise Error('Unsupported order of dimensions: ' + str(self.dimOrder))
def __str__(self):
return '(' + str(self.x) + ',' + str(self.y) + ',' + str(self.w) + ',' + str(self.h) + ')'
def __repr__(self):
return self.__str__()
|
adamrehn/slidingwindow
|
slidingwindow/Batching.py
|
batchWindows
|
python
|
def batchWindows(windows, batchSize):
return np.array_split(np.array(windows), len(windows) // batchSize)
|
Splits a list of windows into a series of batches.
|
train
|
https://github.com/adamrehn/slidingwindow/blob/17ea9395b48671e8cb7321b9510c6b25fec5e45f/slidingwindow/Batching.py#L3-L7
| null |
import numpy as np
|
adamrehn/slidingwindow
|
slidingwindow/WindowDistance.py
|
generateDistanceMatrix
|
python
|
def generateDistanceMatrix(width, height):
# Determine the coordinates of the exact centre of the window
originX = width / 2
originY = height / 2
# Generate the distance matrix
distances = zerosFactory((height,width), dtype=np.float)
for index, val in np.ndenumerate(distances):
y,x = index
distances[(y,x)] = math.sqrt( math.pow(x - originX, 2) + math.pow(y - originY, 2) )
return distances
|
Generates a matrix specifying the distance of each point in a window to its centre.
|
train
|
https://github.com/adamrehn/slidingwindow/blob/17ea9395b48671e8cb7321b9510c6b25fec5e45f/slidingwindow/WindowDistance.py#L5-L20
|
[
"def zerosFactory(shape, dtype=float):\n\t\"\"\"\n\tCreates a new NumPy array using `arrayFactory()` and fills it with zeros.\n\t\"\"\"\n\tarr = arrayFactory(shape=shape, dtype=dtype)\n\tarr.fill(0)\n\treturn arr\n"
] |
from .ArrayUtils import *
import numpy as np
import math
def generateDistanceMatrix(width, height):
"""
Generates a matrix specifying the distance of each point in a window to its centre.
"""
# Determine the coordinates of the exact centre of the window
originX = width / 2
originY = height / 2
# Generate the distance matrix
distances = zerosFactory((height,width), dtype=np.float)
for index, val in np.ndenumerate(distances):
y,x = index
distances[(y,x)] = math.sqrt( math.pow(x - originX, 2) + math.pow(y - originY, 2) )
return distances
|
adamrehn/slidingwindow
|
slidingwindow/RectangleUtils.py
|
cropRect
|
python
|
def cropRect(rect, cropTop, cropBottom, cropLeft, cropRight):
# Unpack the rectangle
x, y, w, h = rect
# Crop by the specified value
x += cropLeft
y += cropTop
w -= (cropLeft + cropRight)
h -= (cropTop + cropBottom)
# Re-pack the padded rect
return (x,y,w,h)
|
Crops a rectangle by the specified number of pixels on each side.
The input rectangle and return value are both a tuple of (x,y,w,h).
|
train
|
https://github.com/adamrehn/slidingwindow/blob/17ea9395b48671e8cb7321b9510c6b25fec5e45f/slidingwindow/RectangleUtils.py#L4-L21
| null |
import numpy as np
import math
def cropRect(rect, cropTop, cropBottom, cropLeft, cropRight):
"""
Crops a rectangle by the specified number of pixels on each side.
The input rectangle and return value are both a tuple of (x,y,w,h).
"""
# Unpack the rectangle
x, y, w, h = rect
# Crop by the specified value
x += cropLeft
y += cropTop
w -= (cropLeft + cropRight)
h -= (cropTop + cropBottom)
# Re-pack the padded rect
return (x,y,w,h)
def padRect(rect, padTop, padBottom, padLeft, padRight, bounds, clipExcess = True):
"""
Pads a rectangle by the specified values on each individual side,
ensuring the padded rectangle falls within the specified bounds.
The input rectangle, bounds, and return value are all a tuple of (x,y,w,h).
"""
# Unpack the rectangle
x, y, w, h = rect
# Pad by the specified value
x -= padLeft
y -= padTop
w += (padLeft + padRight)
h += (padTop + padBottom)
# Determine if we are clipping overflows/underflows or
# shifting the centre of the rectangle to compensate
if clipExcess == True:
# Clip any underflows
x = max(0, x)
y = max(0, y)
# Clip any overflows
overflowY = max(0, (y + h) - bounds[0])
overflowX = max(0, (x + w) - bounds[1])
h -= overflowY
w -= overflowX
else:
# Compensate for any underflows
underflowX = max(0, 0 - x)
underflowY = max(0, 0 - y)
x += underflowX
y += underflowY
# Compensate for any overflows
overflowY = max(0, (y + h) - bounds[0])
overflowX = max(0, (x + w) - bounds[1])
x -= overflowX
w += overflowX
y -= overflowY
h += overflowY
# If there are still overflows or underflows after our
# modifications, we have no choice but to clip them
x, y, w, h = padRect((x,y,w,h), 0, 0, 0, 0, bounds, True)
# Re-pack the padded rect
return (x,y,w,h)
def cropRectEqually(rect, cropping):
"""
Crops a rectangle by the specified number of pixels on all sides.
The input rectangle and return value are both a tuple of (x,y,w,h).
"""
return cropRect(rect, cropping, cropping, cropping, cropping)
def padRectEqually(rect, padding, bounds, clipExcess = True):
"""
Applies equal padding to all sides of a rectangle,
ensuring the padded rectangle falls within the specified bounds.
The input rectangle, bounds, and return value are all a tuple of (x,y,w,h).
"""
return padRect(rect, padding, padding, padding, padding, bounds, clipExcess)
def squareAspect(rect):
"""
Crops either the width or height, as necessary, to make a rectangle into a square.
The input rectangle and return value are both a tuple of (x,y,w,h).
"""
# Determine which dimension needs to be cropped
x,y,w,h = rect
if w > h:
cropX = (w - h) // 2
return cropRect(rect, 0, 0, cropX, cropX)
elif w < h:
cropY = (h - w) // 2
return cropRect(rect, cropY, cropY, 0, 0)
# Already a square
return rect
def fitToSize(rect, targetWidth, targetHeight, bounds):
"""
Pads or crops a rectangle as necessary to achieve the target dimensions,
ensuring the modified rectangle falls within the specified bounds.
The input rectangle, bounds, and return value are all a tuple of (x,y,w,h).
"""
# Determine the difference between the current size and target size
x,y,w,h = rect
diffX = w - targetWidth
diffY = h - targetHeight
# Determine if we are cropping or padding the width
if diffX > 0:
cropLeft = math.floor(diffX / 2)
cropRight = diffX - cropLeft
x,y,w,h = cropRect((x,y,w,h), 0, 0, cropLeft, cropRight)
elif diffX < 0:
padLeft = math.floor(abs(diffX) / 2)
padRight = abs(diffX) - padLeft
x,y,w,h = padRect((x,y,w,h), 0, 0, padLeft, padRight, bounds, False)
# Determine if we are cropping or padding the height
if diffY > 0:
cropTop = math.floor(diffY / 2)
cropBottom = diffY - cropTop
x,y,w,h = cropRect((x,y,w,h), cropTop, cropBottom, 0, 0)
elif diffY < 0:
padTop = math.floor(abs(diffY) / 2)
padBottom = abs(diffY) - padTop
x,y,w,h = padRect((x,y,w,h), padTop, padBottom, 0, 0, bounds, False)
return (x,y,w,h)
|
adamrehn/slidingwindow
|
slidingwindow/RectangleUtils.py
|
padRect
|
python
|
def padRect(rect, padTop, padBottom, padLeft, padRight, bounds, clipExcess = True):
# Unpack the rectangle
x, y, w, h = rect
# Pad by the specified value
x -= padLeft
y -= padTop
w += (padLeft + padRight)
h += (padTop + padBottom)
# Determine if we are clipping overflows/underflows or
# shifting the centre of the rectangle to compensate
if clipExcess == True:
# Clip any underflows
x = max(0, x)
y = max(0, y)
# Clip any overflows
overflowY = max(0, (y + h) - bounds[0])
overflowX = max(0, (x + w) - bounds[1])
h -= overflowY
w -= overflowX
else:
# Compensate for any underflows
underflowX = max(0, 0 - x)
underflowY = max(0, 0 - y)
x += underflowX
y += underflowY
# Compensate for any overflows
overflowY = max(0, (y + h) - bounds[0])
overflowX = max(0, (x + w) - bounds[1])
x -= overflowX
w += overflowX
y -= overflowY
h += overflowY
# If there are still overflows or underflows after our
# modifications, we have no choice but to clip them
x, y, w, h = padRect((x,y,w,h), 0, 0, 0, 0, bounds, True)
# Re-pack the padded rect
return (x,y,w,h)
|
Pads a rectangle by the specified values on each individual side,
ensuring the padded rectangle falls within the specified bounds.
The input rectangle, bounds, and return value are all a tuple of (x,y,w,h).
|
train
|
https://github.com/adamrehn/slidingwindow/blob/17ea9395b48671e8cb7321b9510c6b25fec5e45f/slidingwindow/RectangleUtils.py#L24-L76
|
[
"def padRect(rect, padTop, padBottom, padLeft, padRight, bounds, clipExcess = True):\n\t\"\"\"\n\tPads a rectangle by the specified values on each individual side,\n\tensuring the padded rectangle falls within the specified bounds.\n\n\tThe input rectangle, bounds, and return value are all a tuple of (x,y,w,h).\n\t\"\"\"\n\n\t# Unpack the rectangle\n\tx, y, w, h = rect\n\n\t# Pad by the specified value\n\tx -= padLeft\n\ty -= padTop\n\tw += (padLeft + padRight)\n\th += (padTop + padBottom)\n\n\t# Determine if we are clipping overflows/underflows or\n\t# shifting the centre of the rectangle to compensate\n\tif clipExcess == True:\n\n\t\t# Clip any underflows\n\t\tx = max(0, x)\n\t\ty = max(0, y)\n\n\t\t# Clip any overflows\n\t\toverflowY = max(0, (y + h) - bounds[0])\n\t\toverflowX = max(0, (x + w) - bounds[1])\n\t\th -= overflowY\n\t\tw -= overflowX\n\n\telse:\n\n\t\t# Compensate for any underflows\n\t\tunderflowX = max(0, 0 - x)\n\t\tunderflowY = max(0, 0 - y)\n\t\tx += underflowX\n\t\ty += underflowY\n\n\t\t# Compensate for any overflows\n\t\toverflowY = max(0, (y + h) - bounds[0])\n\t\toverflowX = max(0, (x + w) - bounds[1])\n\t\tx -= overflowX\n\t\tw += overflowX\n\t\ty -= overflowY\n\t\th += overflowY\n\n\t\t# If there are still overflows or underflows after our\n\t\t# modifications, we have no choice but to clip them\n\t\tx, y, w, h = padRect((x,y,w,h), 0, 0, 0, 0, bounds, True)\n\n\t# Re-pack the padded rect\n\treturn (x,y,w,h)\n"
] |
import numpy as np
import math
def cropRect(rect, cropTop, cropBottom, cropLeft, cropRight):
"""
Crops a rectangle by the specified number of pixels on each side.
The input rectangle and return value are both a tuple of (x,y,w,h).
"""
# Unpack the rectangle
x, y, w, h = rect
# Crop by the specified value
x += cropLeft
y += cropTop
w -= (cropLeft + cropRight)
h -= (cropTop + cropBottom)
# Re-pack the padded rect
return (x,y,w,h)
def padRect(rect, padTop, padBottom, padLeft, padRight, bounds, clipExcess = True):
"""
Pads a rectangle by the specified values on each individual side,
ensuring the padded rectangle falls within the specified bounds.
The input rectangle, bounds, and return value are all a tuple of (x,y,w,h).
"""
# Unpack the rectangle
x, y, w, h = rect
# Pad by the specified value
x -= padLeft
y -= padTop
w += (padLeft + padRight)
h += (padTop + padBottom)
# Determine if we are clipping overflows/underflows or
# shifting the centre of the rectangle to compensate
if clipExcess == True:
# Clip any underflows
x = max(0, x)
y = max(0, y)
# Clip any overflows
overflowY = max(0, (y + h) - bounds[0])
overflowX = max(0, (x + w) - bounds[1])
h -= overflowY
w -= overflowX
else:
# Compensate for any underflows
underflowX = max(0, 0 - x)
underflowY = max(0, 0 - y)
x += underflowX
y += underflowY
# Compensate for any overflows
overflowY = max(0, (y + h) - bounds[0])
overflowX = max(0, (x + w) - bounds[1])
x -= overflowX
w += overflowX
y -= overflowY
h += overflowY
# If there are still overflows or underflows after our
# modifications, we have no choice but to clip them
x, y, w, h = padRect((x,y,w,h), 0, 0, 0, 0, bounds, True)
# Re-pack the padded rect
return (x,y,w,h)
def cropRectEqually(rect, cropping):
"""
Crops a rectangle by the specified number of pixels on all sides.
The input rectangle and return value are both a tuple of (x,y,w,h).
"""
return cropRect(rect, cropping, cropping, cropping, cropping)
def padRectEqually(rect, padding, bounds, clipExcess = True):
"""
Applies equal padding to all sides of a rectangle,
ensuring the padded rectangle falls within the specified bounds.
The input rectangle, bounds, and return value are all a tuple of (x,y,w,h).
"""
return padRect(rect, padding, padding, padding, padding, bounds, clipExcess)
def squareAspect(rect):
"""
Crops either the width or height, as necessary, to make a rectangle into a square.
The input rectangle and return value are both a tuple of (x,y,w,h).
"""
# Determine which dimension needs to be cropped
x,y,w,h = rect
if w > h:
cropX = (w - h) // 2
return cropRect(rect, 0, 0, cropX, cropX)
elif w < h:
cropY = (h - w) // 2
return cropRect(rect, cropY, cropY, 0, 0)
# Already a square
return rect
def fitToSize(rect, targetWidth, targetHeight, bounds):
"""
Pads or crops a rectangle as necessary to achieve the target dimensions,
ensuring the modified rectangle falls within the specified bounds.
The input rectangle, bounds, and return value are all a tuple of (x,y,w,h).
"""
# Determine the difference between the current size and target size
x,y,w,h = rect
diffX = w - targetWidth
diffY = h - targetHeight
# Determine if we are cropping or padding the width
if diffX > 0:
cropLeft = math.floor(diffX / 2)
cropRight = diffX - cropLeft
x,y,w,h = cropRect((x,y,w,h), 0, 0, cropLeft, cropRight)
elif diffX < 0:
padLeft = math.floor(abs(diffX) / 2)
padRight = abs(diffX) - padLeft
x,y,w,h = padRect((x,y,w,h), 0, 0, padLeft, padRight, bounds, False)
# Determine if we are cropping or padding the height
if diffY > 0:
cropTop = math.floor(diffY / 2)
cropBottom = diffY - cropTop
x,y,w,h = cropRect((x,y,w,h), cropTop, cropBottom, 0, 0)
elif diffY < 0:
padTop = math.floor(abs(diffY) / 2)
padBottom = abs(diffY) - padTop
x,y,w,h = padRect((x,y,w,h), padTop, padBottom, 0, 0, bounds, False)
return (x,y,w,h)
|
adamrehn/slidingwindow
|
slidingwindow/RectangleUtils.py
|
padRectEqually
|
python
|
def padRectEqually(rect, padding, bounds, clipExcess = True):
return padRect(rect, padding, padding, padding, padding, bounds, clipExcess)
|
Applies equal padding to all sides of a rectangle,
ensuring the padded rectangle falls within the specified bounds.
The input rectangle, bounds, and return value are all a tuple of (x,y,w,h).
|
train
|
https://github.com/adamrehn/slidingwindow/blob/17ea9395b48671e8cb7321b9510c6b25fec5e45f/slidingwindow/RectangleUtils.py#L88-L95
|
[
"def padRect(rect, padTop, padBottom, padLeft, padRight, bounds, clipExcess = True):\n\t\"\"\"\n\tPads a rectangle by the specified values on each individual side,\n\tensuring the padded rectangle falls within the specified bounds.\n\n\tThe input rectangle, bounds, and return value are all a tuple of (x,y,w,h).\n\t\"\"\"\n\n\t# Unpack the rectangle\n\tx, y, w, h = rect\n\n\t# Pad by the specified value\n\tx -= padLeft\n\ty -= padTop\n\tw += (padLeft + padRight)\n\th += (padTop + padBottom)\n\n\t# Determine if we are clipping overflows/underflows or\n\t# shifting the centre of the rectangle to compensate\n\tif clipExcess == True:\n\n\t\t# Clip any underflows\n\t\tx = max(0, x)\n\t\ty = max(0, y)\n\n\t\t# Clip any overflows\n\t\toverflowY = max(0, (y + h) - bounds[0])\n\t\toverflowX = max(0, (x + w) - bounds[1])\n\t\th -= overflowY\n\t\tw -= overflowX\n\n\telse:\n\n\t\t# Compensate for any underflows\n\t\tunderflowX = max(0, 0 - x)\n\t\tunderflowY = max(0, 0 - y)\n\t\tx += underflowX\n\t\ty += underflowY\n\n\t\t# Compensate for any overflows\n\t\toverflowY = max(0, (y + h) - bounds[0])\n\t\toverflowX = max(0, (x + w) - bounds[1])\n\t\tx -= overflowX\n\t\tw += overflowX\n\t\ty -= overflowY\n\t\th += overflowY\n\n\t\t# If there are still overflows or underflows after our\n\t\t# modifications, we have no choice but to clip them\n\t\tx, y, w, h = padRect((x,y,w,h), 0, 0, 0, 0, bounds, True)\n\n\t# Re-pack the padded rect\n\treturn (x,y,w,h)\n"
] |
import numpy as np
import math
def cropRect(rect, cropTop, cropBottom, cropLeft, cropRight):
"""
Crops a rectangle by the specified number of pixels on each side.
The input rectangle and return value are both a tuple of (x,y,w,h).
"""
# Unpack the rectangle
x, y, w, h = rect
# Crop by the specified value
x += cropLeft
y += cropTop
w -= (cropLeft + cropRight)
h -= (cropTop + cropBottom)
# Re-pack the padded rect
return (x,y,w,h)
def padRect(rect, padTop, padBottom, padLeft, padRight, bounds, clipExcess = True):
"""
Pads a rectangle by the specified values on each individual side,
ensuring the padded rectangle falls within the specified bounds.
The input rectangle, bounds, and return value are all a tuple of (x,y,w,h).
"""
# Unpack the rectangle
x, y, w, h = rect
# Pad by the specified value
x -= padLeft
y -= padTop
w += (padLeft + padRight)
h += (padTop + padBottom)
# Determine if we are clipping overflows/underflows or
# shifting the centre of the rectangle to compensate
if clipExcess == True:
# Clip any underflows
x = max(0, x)
y = max(0, y)
# Clip any overflows
overflowY = max(0, (y + h) - bounds[0])
overflowX = max(0, (x + w) - bounds[1])
h -= overflowY
w -= overflowX
else:
# Compensate for any underflows
underflowX = max(0, 0 - x)
underflowY = max(0, 0 - y)
x += underflowX
y += underflowY
# Compensate for any overflows
overflowY = max(0, (y + h) - bounds[0])
overflowX = max(0, (x + w) - bounds[1])
x -= overflowX
w += overflowX
y -= overflowY
h += overflowY
# If there are still overflows or underflows after our
# modifications, we have no choice but to clip them
x, y, w, h = padRect((x,y,w,h), 0, 0, 0, 0, bounds, True)
# Re-pack the padded rect
return (x,y,w,h)
def cropRectEqually(rect, cropping):
"""
Crops a rectangle by the specified number of pixels on all sides.
The input rectangle and return value are both a tuple of (x,y,w,h).
"""
return cropRect(rect, cropping, cropping, cropping, cropping)
def padRectEqually(rect, padding, bounds, clipExcess = True):
"""
Applies equal padding to all sides of a rectangle,
ensuring the padded rectangle falls within the specified bounds.
The input rectangle, bounds, and return value are all a tuple of (x,y,w,h).
"""
return padRect(rect, padding, padding, padding, padding, bounds, clipExcess)
def squareAspect(rect):
"""
Crops either the width or height, as necessary, to make a rectangle into a square.
The input rectangle and return value are both a tuple of (x,y,w,h).
"""
# Determine which dimension needs to be cropped
x,y,w,h = rect
if w > h:
cropX = (w - h) // 2
return cropRect(rect, 0, 0, cropX, cropX)
elif w < h:
cropY = (h - w) // 2
return cropRect(rect, cropY, cropY, 0, 0)
# Already a square
return rect
def fitToSize(rect, targetWidth, targetHeight, bounds):
"""
Pads or crops a rectangle as necessary to achieve the target dimensions,
ensuring the modified rectangle falls within the specified bounds.
The input rectangle, bounds, and return value are all a tuple of (x,y,w,h).
"""
# Determine the difference between the current size and target size
x,y,w,h = rect
diffX = w - targetWidth
diffY = h - targetHeight
# Determine if we are cropping or padding the width
if diffX > 0:
cropLeft = math.floor(diffX / 2)
cropRight = diffX - cropLeft
x,y,w,h = cropRect((x,y,w,h), 0, 0, cropLeft, cropRight)
elif diffX < 0:
padLeft = math.floor(abs(diffX) / 2)
padRight = abs(diffX) - padLeft
x,y,w,h = padRect((x,y,w,h), 0, 0, padLeft, padRight, bounds, False)
# Determine if we are cropping or padding the height
if diffY > 0:
cropTop = math.floor(diffY / 2)
cropBottom = diffY - cropTop
x,y,w,h = cropRect((x,y,w,h), cropTop, cropBottom, 0, 0)
elif diffY < 0:
padTop = math.floor(abs(diffY) / 2)
padBottom = abs(diffY) - padTop
x,y,w,h = padRect((x,y,w,h), padTop, padBottom, 0, 0, bounds, False)
return (x,y,w,h)
|
adamrehn/slidingwindow
|
slidingwindow/RectangleUtils.py
|
squareAspect
|
python
|
def squareAspect(rect):
# Determine which dimension needs to be cropped
x,y,w,h = rect
if w > h:
cropX = (w - h) // 2
return cropRect(rect, 0, 0, cropX, cropX)
elif w < h:
cropY = (h - w) // 2
return cropRect(rect, cropY, cropY, 0, 0)
# Already a square
return rect
|
Crops either the width or height, as necessary, to make a rectangle into a square.
The input rectangle and return value are both a tuple of (x,y,w,h).
|
train
|
https://github.com/adamrehn/slidingwindow/blob/17ea9395b48671e8cb7321b9510c6b25fec5e45f/slidingwindow/RectangleUtils.py#L98-L115
|
[
"def cropRect(rect, cropTop, cropBottom, cropLeft, cropRight):\n\t\"\"\"\n\tCrops a rectangle by the specified number of pixels on each side.\n\n\tThe input rectangle and return value are both a tuple of (x,y,w,h).\n\t\"\"\"\n\n\t# Unpack the rectangle\n\tx, y, w, h = rect\n\n\t# Crop by the specified value\n\tx += cropLeft\n\ty += cropTop\n\tw -= (cropLeft + cropRight)\n\th -= (cropTop + cropBottom)\n\n\t# Re-pack the padded rect\n\treturn (x,y,w,h)\n"
] |
import numpy as np
import math
def cropRect(rect, cropTop, cropBottom, cropLeft, cropRight):
"""
Crops a rectangle by the specified number of pixels on each side.
The input rectangle and return value are both a tuple of (x,y,w,h).
"""
# Unpack the rectangle
x, y, w, h = rect
# Crop by the specified value
x += cropLeft
y += cropTop
w -= (cropLeft + cropRight)
h -= (cropTop + cropBottom)
# Re-pack the padded rect
return (x,y,w,h)
def padRect(rect, padTop, padBottom, padLeft, padRight, bounds, clipExcess = True):
"""
Pads a rectangle by the specified values on each individual side,
ensuring the padded rectangle falls within the specified bounds.
The input rectangle, bounds, and return value are all a tuple of (x,y,w,h).
"""
# Unpack the rectangle
x, y, w, h = rect
# Pad by the specified value
x -= padLeft
y -= padTop
w += (padLeft + padRight)
h += (padTop + padBottom)
# Determine if we are clipping overflows/underflows or
# shifting the centre of the rectangle to compensate
if clipExcess == True:
# Clip any underflows
x = max(0, x)
y = max(0, y)
# Clip any overflows
overflowY = max(0, (y + h) - bounds[0])
overflowX = max(0, (x + w) - bounds[1])
h -= overflowY
w -= overflowX
else:
# Compensate for any underflows
underflowX = max(0, 0 - x)
underflowY = max(0, 0 - y)
x += underflowX
y += underflowY
# Compensate for any overflows
overflowY = max(0, (y + h) - bounds[0])
overflowX = max(0, (x + w) - bounds[1])
x -= overflowX
w += overflowX
y -= overflowY
h += overflowY
# If there are still overflows or underflows after our
# modifications, we have no choice but to clip them
x, y, w, h = padRect((x,y,w,h), 0, 0, 0, 0, bounds, True)
# Re-pack the padded rect
return (x,y,w,h)
def cropRectEqually(rect, cropping):
"""
Crops a rectangle by the specified number of pixels on all sides.
The input rectangle and return value are both a tuple of (x,y,w,h).
"""
return cropRect(rect, cropping, cropping, cropping, cropping)
def padRectEqually(rect, padding, bounds, clipExcess = True):
"""
Applies equal padding to all sides of a rectangle,
ensuring the padded rectangle falls within the specified bounds.
The input rectangle, bounds, and return value are all a tuple of (x,y,w,h).
"""
return padRect(rect, padding, padding, padding, padding, bounds, clipExcess)
def squareAspect(rect):
"""
Crops either the width or height, as necessary, to make a rectangle into a square.
The input rectangle and return value are both a tuple of (x,y,w,h).
"""
# Determine which dimension needs to be cropped
x,y,w,h = rect
if w > h:
cropX = (w - h) // 2
return cropRect(rect, 0, 0, cropX, cropX)
elif w < h:
cropY = (h - w) // 2
return cropRect(rect, cropY, cropY, 0, 0)
# Already a square
return rect
def fitToSize(rect, targetWidth, targetHeight, bounds):
"""
Pads or crops a rectangle as necessary to achieve the target dimensions,
ensuring the modified rectangle falls within the specified bounds.
The input rectangle, bounds, and return value are all a tuple of (x,y,w,h).
"""
# Determine the difference between the current size and target size
x,y,w,h = rect
diffX = w - targetWidth
diffY = h - targetHeight
# Determine if we are cropping or padding the width
if diffX > 0:
cropLeft = math.floor(diffX / 2)
cropRight = diffX - cropLeft
x,y,w,h = cropRect((x,y,w,h), 0, 0, cropLeft, cropRight)
elif diffX < 0:
padLeft = math.floor(abs(diffX) / 2)
padRight = abs(diffX) - padLeft
x,y,w,h = padRect((x,y,w,h), 0, 0, padLeft, padRight, bounds, False)
# Determine if we are cropping or padding the height
if diffY > 0:
cropTop = math.floor(diffY / 2)
cropBottom = diffY - cropTop
x,y,w,h = cropRect((x,y,w,h), cropTop, cropBottom, 0, 0)
elif diffY < 0:
padTop = math.floor(abs(diffY) / 2)
padBottom = abs(diffY) - padTop
x,y,w,h = padRect((x,y,w,h), padTop, padBottom, 0, 0, bounds, False)
return (x,y,w,h)
|
adamrehn/slidingwindow
|
slidingwindow/RectangleUtils.py
|
fitToSize
|
python
|
def fitToSize(rect, targetWidth, targetHeight, bounds):
# Determine the difference between the current size and target size
x,y,w,h = rect
diffX = w - targetWidth
diffY = h - targetHeight
# Determine if we are cropping or padding the width
if diffX > 0:
cropLeft = math.floor(diffX / 2)
cropRight = diffX - cropLeft
x,y,w,h = cropRect((x,y,w,h), 0, 0, cropLeft, cropRight)
elif diffX < 0:
padLeft = math.floor(abs(diffX) / 2)
padRight = abs(diffX) - padLeft
x,y,w,h = padRect((x,y,w,h), 0, 0, padLeft, padRight, bounds, False)
# Determine if we are cropping or padding the height
if diffY > 0:
cropTop = math.floor(diffY / 2)
cropBottom = diffY - cropTop
x,y,w,h = cropRect((x,y,w,h), cropTop, cropBottom, 0, 0)
elif diffY < 0:
padTop = math.floor(abs(diffY) / 2)
padBottom = abs(diffY) - padTop
x,y,w,h = padRect((x,y,w,h), padTop, padBottom, 0, 0, bounds, False)
return (x,y,w,h)
|
Pads or crops a rectangle as necessary to achieve the target dimensions,
ensuring the modified rectangle falls within the specified bounds.
The input rectangle, bounds, and return value are all a tuple of (x,y,w,h).
|
train
|
https://github.com/adamrehn/slidingwindow/blob/17ea9395b48671e8cb7321b9510c6b25fec5e45f/slidingwindow/RectangleUtils.py#L118-L151
|
[
"def cropRect(rect, cropTop, cropBottom, cropLeft, cropRight):\n\t\"\"\"\n\tCrops a rectangle by the specified number of pixels on each side.\n\n\tThe input rectangle and return value are both a tuple of (x,y,w,h).\n\t\"\"\"\n\n\t# Unpack the rectangle\n\tx, y, w, h = rect\n\n\t# Crop by the specified value\n\tx += cropLeft\n\ty += cropTop\n\tw -= (cropLeft + cropRight)\n\th -= (cropTop + cropBottom)\n\n\t# Re-pack the padded rect\n\treturn (x,y,w,h)\n",
"def padRect(rect, padTop, padBottom, padLeft, padRight, bounds, clipExcess = True):\n\t\"\"\"\n\tPads a rectangle by the specified values on each individual side,\n\tensuring the padded rectangle falls within the specified bounds.\n\n\tThe input rectangle, bounds, and return value are all a tuple of (x,y,w,h).\n\t\"\"\"\n\n\t# Unpack the rectangle\n\tx, y, w, h = rect\n\n\t# Pad by the specified value\n\tx -= padLeft\n\ty -= padTop\n\tw += (padLeft + padRight)\n\th += (padTop + padBottom)\n\n\t# Determine if we are clipping overflows/underflows or\n\t# shifting the centre of the rectangle to compensate\n\tif clipExcess == True:\n\n\t\t# Clip any underflows\n\t\tx = max(0, x)\n\t\ty = max(0, y)\n\n\t\t# Clip any overflows\n\t\toverflowY = max(0, (y + h) - bounds[0])\n\t\toverflowX = max(0, (x + w) - bounds[1])\n\t\th -= overflowY\n\t\tw -= overflowX\n\n\telse:\n\n\t\t# Compensate for any underflows\n\t\tunderflowX = max(0, 0 - x)\n\t\tunderflowY = max(0, 0 - y)\n\t\tx += underflowX\n\t\ty += underflowY\n\n\t\t# Compensate for any overflows\n\t\toverflowY = max(0, (y + h) - bounds[0])\n\t\toverflowX = max(0, (x + w) - bounds[1])\n\t\tx -= overflowX\n\t\tw += overflowX\n\t\ty -= overflowY\n\t\th += overflowY\n\n\t\t# If there are still overflows or underflows after our\n\t\t# modifications, we have no choice but to clip them\n\t\tx, y, w, h = padRect((x,y,w,h), 0, 0, 0, 0, bounds, True)\n\n\t# Re-pack the padded rect\n\treturn (x,y,w,h)\n"
] |
import numpy as np
import math
def cropRect(rect, cropTop, cropBottom, cropLeft, cropRight):
"""
Crops a rectangle by the specified number of pixels on each side.
The input rectangle and return value are both a tuple of (x,y,w,h).
"""
# Unpack the rectangle
x, y, w, h = rect
# Crop by the specified value
x += cropLeft
y += cropTop
w -= (cropLeft + cropRight)
h -= (cropTop + cropBottom)
# Re-pack the padded rect
return (x,y,w,h)
def padRect(rect, padTop, padBottom, padLeft, padRight, bounds, clipExcess = True):
"""
Pads a rectangle by the specified values on each individual side,
ensuring the padded rectangle falls within the specified bounds.
The input rectangle, bounds, and return value are all a tuple of (x,y,w,h).
"""
# Unpack the rectangle
x, y, w, h = rect
# Pad by the specified value
x -= padLeft
y -= padTop
w += (padLeft + padRight)
h += (padTop + padBottom)
# Determine if we are clipping overflows/underflows or
# shifting the centre of the rectangle to compensate
if clipExcess == True:
# Clip any underflows
x = max(0, x)
y = max(0, y)
# Clip any overflows
overflowY = max(0, (y + h) - bounds[0])
overflowX = max(0, (x + w) - bounds[1])
h -= overflowY
w -= overflowX
else:
# Compensate for any underflows
underflowX = max(0, 0 - x)
underflowY = max(0, 0 - y)
x += underflowX
y += underflowY
# Compensate for any overflows
overflowY = max(0, (y + h) - bounds[0])
overflowX = max(0, (x + w) - bounds[1])
x -= overflowX
w += overflowX
y -= overflowY
h += overflowY
# If there are still overflows or underflows after our
# modifications, we have no choice but to clip them
x, y, w, h = padRect((x,y,w,h), 0, 0, 0, 0, bounds, True)
# Re-pack the padded rect
return (x,y,w,h)
def cropRectEqually(rect, cropping):
"""
Crops a rectangle by the specified number of pixels on all sides.
The input rectangle and return value are both a tuple of (x,y,w,h).
"""
return cropRect(rect, cropping, cropping, cropping, cropping)
def padRectEqually(rect, padding, bounds, clipExcess = True):
"""
Applies equal padding to all sides of a rectangle,
ensuring the padded rectangle falls within the specified bounds.
The input rectangle, bounds, and return value are all a tuple of (x,y,w,h).
"""
return padRect(rect, padding, padding, padding, padding, bounds, clipExcess)
def squareAspect(rect):
"""
Crops either the width or height, as necessary, to make a rectangle into a square.
The input rectangle and return value are both a tuple of (x,y,w,h).
"""
# Determine which dimension needs to be cropped
x,y,w,h = rect
if w > h:
cropX = (w - h) // 2
return cropRect(rect, 0, 0, cropX, cropX)
elif w < h:
cropY = (h - w) // 2
return cropRect(rect, cropY, cropY, 0, 0)
# Already a square
return rect
def fitToSize(rect, targetWidth, targetHeight, bounds):
"""
Pads or crops a rectangle as necessary to achieve the target dimensions,
ensuring the modified rectangle falls within the specified bounds.
The input rectangle, bounds, and return value are all a tuple of (x,y,w,h).
"""
# Determine the difference between the current size and target size
x,y,w,h = rect
diffX = w - targetWidth
diffY = h - targetHeight
# Determine if we are cropping or padding the width
if diffX > 0:
cropLeft = math.floor(diffX / 2)
cropRight = diffX - cropLeft
x,y,w,h = cropRect((x,y,w,h), 0, 0, cropLeft, cropRight)
elif diffX < 0:
padLeft = math.floor(abs(diffX) / 2)
padRight = abs(diffX) - padLeft
x,y,w,h = padRect((x,y,w,h), 0, 0, padLeft, padRight, bounds, False)
# Determine if we are cropping or padding the height
if diffY > 0:
cropTop = math.floor(diffY / 2)
cropBottom = diffY - cropTop
x,y,w,h = cropRect((x,y,w,h), cropTop, cropBottom, 0, 0)
elif diffY < 0:
padTop = math.floor(abs(diffY) / 2)
padBottom = abs(diffY) - padTop
x,y,w,h = padRect((x,y,w,h), padTop, padBottom, 0, 0, bounds, False)
return (x,y,w,h)
|
adamrehn/slidingwindow
|
slidingwindow/Merging.py
|
mergeWindows
|
python
|
def mergeWindows(data, dimOrder, maxWindowSize, overlapPercent, batchSize, transform, progressCallback = None):
# Determine the dimensions of the input data
sourceWidth = data.shape[dimOrder.index('w')]
sourceHeight = data.shape[dimOrder.index('h')]
# Generate the sliding windows and group them into batches
windows = generate(data, dimOrder, maxWindowSize, overlapPercent)
batches = batchWindows(windows, batchSize)
# Apply the transform to the first batch of windows and determine the result dimensionality
exemplarResult = transform(data, batches[0])
resultDimensions = exemplarResult.shape[ len(exemplarResult.shape) - 1 ]
# Create the matrices to hold the sums and counts for the transform result values
sums = np.zeros((sourceHeight, sourceWidth, resultDimensions), dtype=np.float)
counts = np.zeros((sourceHeight, sourceWidth), dtype=np.uint32)
# Iterate over the batches and apply the transformation function to each batch
for batchNum, batch in enumerate(batches):
# If a progress callback was supplied, call it
if progressCallback != None:
progressCallback(batchNum, len(batches))
# Apply the transformation function to the current batch
batchResult = transform(data, batch)
# Iterate over the windows in the batch and update the sums matrix
for windowNum, window in enumerate(batch):
# Create views into the larger matrices that correspond to the current window
windowIndices = window.indices(False)
sumsView = sums[windowIndices]
countsView = counts[windowIndices]
# Update the result sums for each of the dataset elements in the window
sumsView[:] += batchResult[windowNum]
countsView[:] += 1
# Use the sums and the counts to compute the mean values
for dim in range(0, resultDimensions):
sums[:,:,dim] /= counts
# Return the mean values
return sums
|
Generates sliding windows for the specified dataset and applies the specified
transformation function to each window. Where multiple overlapping windows
include an element of the input dataset, the overlap is resolved by computing
the mean transform result value for that element.
Irrespective of the order of the dimensions of the input dataset, the
transformation function should return a NumPy array with dimensions
[batch, height, width, resultChannels].
If a progress callback is supplied, it will be called immediately before
applying the transformation function to each batch of windows. The callback
should accept the current batch index and number of batches as arguments.
|
train
|
https://github.com/adamrehn/slidingwindow/blob/17ea9395b48671e8cb7321b9510c6b25fec5e45f/slidingwindow/Merging.py#L5-L64
|
[
"def batchWindows(windows, batchSize):\n\t\"\"\"\n\tSplits a list of windows into a series of batches.\n\t\"\"\"\n\treturn np.array_split(np.array(windows), len(windows) // batchSize)\n",
"def generate(data, dimOrder, maxWindowSize, overlapPercent, transforms = []):\n\t\"\"\"\n\tGenerates a set of sliding windows for the specified dataset.\n\t\"\"\"\n\n\t# Determine the dimensions of the input data\n\twidth = data.shape[dimOrder.index('w')]\n\theight = data.shape[dimOrder.index('h')]\n\n\t# Generate the windows\n\treturn generateForSize(width, height, dimOrder, maxWindowSize, overlapPercent, transforms)\n"
] |
from .SlidingWindow import generate
from .Batching import batchWindows
import numpy as np
def mergeWindows(data, dimOrder, maxWindowSize, overlapPercent, batchSize, transform, progressCallback = None):
"""
Generates sliding windows for the specified dataset and applies the specified
transformation function to each window. Where multiple overlapping windows
include an element of the input dataset, the overlap is resolved by computing
the mean transform result value for that element.
Irrespective of the order of the dimensions of the input dataset, the
transformation function should return a NumPy array with dimensions
[batch, height, width, resultChannels].
If a progress callback is supplied, it will be called immediately before
applying the transformation function to each batch of windows. The callback
should accept the current batch index and number of batches as arguments.
"""
# Determine the dimensions of the input data
sourceWidth = data.shape[dimOrder.index('w')]
sourceHeight = data.shape[dimOrder.index('h')]
# Generate the sliding windows and group them into batches
windows = generate(data, dimOrder, maxWindowSize, overlapPercent)
batches = batchWindows(windows, batchSize)
# Apply the transform to the first batch of windows and determine the result dimensionality
exemplarResult = transform(data, batches[0])
resultDimensions = exemplarResult.shape[ len(exemplarResult.shape) - 1 ]
# Create the matrices to hold the sums and counts for the transform result values
sums = np.zeros((sourceHeight, sourceWidth, resultDimensions), dtype=np.float)
counts = np.zeros((sourceHeight, sourceWidth), dtype=np.uint32)
# Iterate over the batches and apply the transformation function to each batch
for batchNum, batch in enumerate(batches):
# If a progress callback was supplied, call it
if progressCallback != None:
progressCallback(batchNum, len(batches))
# Apply the transformation function to the current batch
batchResult = transform(data, batch)
# Iterate over the windows in the batch and update the sums matrix
for windowNum, window in enumerate(batch):
# Create views into the larger matrices that correspond to the current window
windowIndices = window.indices(False)
sumsView = sums[windowIndices]
countsView = counts[windowIndices]
# Update the result sums for each of the dataset elements in the window
sumsView[:] += batchResult[windowNum]
countsView[:] += 1
# Use the sums and the counts to compute the mean values
for dim in range(0, resultDimensions):
sums[:,:,dim] /= counts
# Return the mean values
return sums
|
adamrehn/slidingwindow
|
slidingwindow/ArrayUtils.py
|
_requiredSize
|
python
|
def _requiredSize(shape, dtype):
return math.floor(np.prod(np.asarray(shape, dtype=np.uint64)) * np.dtype(dtype).itemsize)
|
Determines the number of bytes required to store a NumPy array with
the specified shape and datatype.
|
train
|
https://github.com/adamrehn/slidingwindow/blob/17ea9395b48671e8cb7321b9510c6b25fec5e45f/slidingwindow/ArrayUtils.py#L5-L10
| null |
import math, mmap, tempfile
import numpy as np
import psutil
class TempfileBackedArray(np.ndarray):
"""
A NumPy ndarray that uses a memory-mapped temp file as its backing
"""
def __new__(subtype, shape, dtype=float, buffer=None, offset=0, strides=None, order=None, info=None):
# Determine the size in bytes required to hold the array
numBytes = _requiredSize(shape, dtype)
# Create the temporary file, resize it, and map it into memory
tempFile = tempfile.TemporaryFile()
tempFile.truncate(numBytes)
buf = mmap.mmap(tempFile.fileno(), numBytes, access=mmap.ACCESS_WRITE)
# Create the ndarray with the memory map as the underlying buffer
obj = super(TempfileBackedArray, subtype).__new__(subtype, shape, dtype, buf, 0, None, order)
# Attach the file reference to the ndarray object
obj._file = tempFile
return obj
def __array_finalize__(self, obj):
if obj is None: return
self._file = getattr(obj, '_file', None)
def arrayFactory(shape, dtype=float):
"""
Creates a new ndarray of the specified shape and datatype, storing
it in memory if there is sufficient available space or else using
a memory-mapped temporary file to provide the underlying buffer.
"""
# Determine the number of bytes required to store the array
requiredBytes = _requiredSize(shape, dtype)
# Determine if there is sufficient available memory
vmem = psutil.virtual_memory()
if vmem.available > requiredBytes:
return np.ndarray(shape=shape, dtype=dtype)
else:
return TempfileBackedArray(shape=shape, dtype=dtype)
def zerosFactory(shape, dtype=float):
"""
Creates a new NumPy array using `arrayFactory()` and fills it with zeros.
"""
arr = arrayFactory(shape=shape, dtype=dtype)
arr.fill(0)
return arr
def arrayCast(source, dtype):
"""
Casts a NumPy array to the specified datatype, storing the copy
in memory if there is sufficient available space or else using a
memory-mapped temporary file to provide the underlying buffer.
"""
# Determine the number of bytes required to store the array
requiredBytes = _requiredSize(source.shape, dtype)
# Determine if there is sufficient available memory
vmem = psutil.virtual_memory()
if vmem.available > requiredBytes:
return source.astype(dtype, subok=False)
else:
dest = arrayFactory(source.shape, dtype)
np.copyto(dest, source, casting='unsafe')
return dest
def determineMaxWindowSize(dtype, limit=None):
"""
Determines the largest square window size that can be used, based on
the specified datatype and amount of currently available system memory.
If `limit` is specified, then this value will be returned in the event
that it is smaller than the maximum computed size.
"""
vmem = psutil.virtual_memory()
maxSize = math.floor(math.sqrt(vmem.available / np.dtype(dtype).itemsize))
if limit is None or limit >= maxSize:
return maxSize
else:
return limit
|
adamrehn/slidingwindow
|
slidingwindow/ArrayUtils.py
|
arrayFactory
|
python
|
def arrayFactory(shape, dtype=float):
# Determine the number of bytes required to store the array
requiredBytes = _requiredSize(shape, dtype)
# Determine if there is sufficient available memory
vmem = psutil.virtual_memory()
if vmem.available > requiredBytes:
return np.ndarray(shape=shape, dtype=dtype)
else:
return TempfileBackedArray(shape=shape, dtype=dtype)
|
Creates a new ndarray of the specified shape and datatype, storing
it in memory if there is sufficient available space or else using
a memory-mapped temporary file to provide the underlying buffer.
|
train
|
https://github.com/adamrehn/slidingwindow/blob/17ea9395b48671e8cb7321b9510c6b25fec5e45f/slidingwindow/ArrayUtils.py#L40-L55
|
[
"def _requiredSize(shape, dtype):\n\t\"\"\"\n\tDetermines the number of bytes required to store a NumPy array with\n\tthe specified shape and datatype.\n\t\"\"\"\n\treturn math.floor(np.prod(np.asarray(shape, dtype=np.uint64)) * np.dtype(dtype).itemsize)\n"
] |
import math, mmap, tempfile
import numpy as np
import psutil
def _requiredSize(shape, dtype):
"""
Determines the number of bytes required to store a NumPy array with
the specified shape and datatype.
"""
return math.floor(np.prod(np.asarray(shape, dtype=np.uint64)) * np.dtype(dtype).itemsize)
class TempfileBackedArray(np.ndarray):
"""
A NumPy ndarray that uses a memory-mapped temp file as its backing
"""
def __new__(subtype, shape, dtype=float, buffer=None, offset=0, strides=None, order=None, info=None):
# Determine the size in bytes required to hold the array
numBytes = _requiredSize(shape, dtype)
# Create the temporary file, resize it, and map it into memory
tempFile = tempfile.TemporaryFile()
tempFile.truncate(numBytes)
buf = mmap.mmap(tempFile.fileno(), numBytes, access=mmap.ACCESS_WRITE)
# Create the ndarray with the memory map as the underlying buffer
obj = super(TempfileBackedArray, subtype).__new__(subtype, shape, dtype, buf, 0, None, order)
# Attach the file reference to the ndarray object
obj._file = tempFile
return obj
def __array_finalize__(self, obj):
if obj is None: return
self._file = getattr(obj, '_file', None)
def arrayFactory(shape, dtype=float):
"""
Creates a new ndarray of the specified shape and datatype, storing
it in memory if there is sufficient available space or else using
a memory-mapped temporary file to provide the underlying buffer.
"""
# Determine the number of bytes required to store the array
requiredBytes = _requiredSize(shape, dtype)
# Determine if there is sufficient available memory
vmem = psutil.virtual_memory()
if vmem.available > requiredBytes:
return np.ndarray(shape=shape, dtype=dtype)
else:
return TempfileBackedArray(shape=shape, dtype=dtype)
def zerosFactory(shape, dtype=float):
"""
Creates a new NumPy array using `arrayFactory()` and fills it with zeros.
"""
arr = arrayFactory(shape=shape, dtype=dtype)
arr.fill(0)
return arr
def arrayCast(source, dtype):
"""
Casts a NumPy array to the specified datatype, storing the copy
in memory if there is sufficient available space or else using a
memory-mapped temporary file to provide the underlying buffer.
"""
# Determine the number of bytes required to store the array
requiredBytes = _requiredSize(source.shape, dtype)
# Determine if there is sufficient available memory
vmem = psutil.virtual_memory()
if vmem.available > requiredBytes:
return source.astype(dtype, subok=False)
else:
dest = arrayFactory(source.shape, dtype)
np.copyto(dest, source, casting='unsafe')
return dest
def determineMaxWindowSize(dtype, limit=None):
"""
Determines the largest square window size that can be used, based on
the specified datatype and amount of currently available system memory.
If `limit` is specified, then this value will be returned in the event
that it is smaller than the maximum computed size.
"""
vmem = psutil.virtual_memory()
maxSize = math.floor(math.sqrt(vmem.available / np.dtype(dtype).itemsize))
if limit is None or limit >= maxSize:
return maxSize
else:
return limit
|
adamrehn/slidingwindow
|
slidingwindow/ArrayUtils.py
|
zerosFactory
|
python
|
def zerosFactory(shape, dtype=float):
arr = arrayFactory(shape=shape, dtype=dtype)
arr.fill(0)
return arr
|
Creates a new NumPy array using `arrayFactory()` and fills it with zeros.
|
train
|
https://github.com/adamrehn/slidingwindow/blob/17ea9395b48671e8cb7321b9510c6b25fec5e45f/slidingwindow/ArrayUtils.py#L58-L64
|
[
"def arrayFactory(shape, dtype=float):\n\t\"\"\"\n\tCreates a new ndarray of the specified shape and datatype, storing\n\tit in memory if there is sufficient available space or else using\n\ta memory-mapped temporary file to provide the underlying buffer.\n\t\"\"\"\n\n\t# Determine the number of bytes required to store the array\n\trequiredBytes = _requiredSize(shape, dtype)\n\n\t# Determine if there is sufficient available memory\n\tvmem = psutil.virtual_memory()\n\tif vmem.available > requiredBytes:\n\t\treturn np.ndarray(shape=shape, dtype=dtype)\n\telse:\n\t\treturn TempfileBackedArray(shape=shape, dtype=dtype)\n"
] |
import math, mmap, tempfile
import numpy as np
import psutil
def _requiredSize(shape, dtype):
"""
Determines the number of bytes required to store a NumPy array with
the specified shape and datatype.
"""
return math.floor(np.prod(np.asarray(shape, dtype=np.uint64)) * np.dtype(dtype).itemsize)
class TempfileBackedArray(np.ndarray):
"""
A NumPy ndarray that uses a memory-mapped temp file as its backing
"""
def __new__(subtype, shape, dtype=float, buffer=None, offset=0, strides=None, order=None, info=None):
# Determine the size in bytes required to hold the array
numBytes = _requiredSize(shape, dtype)
# Create the temporary file, resize it, and map it into memory
tempFile = tempfile.TemporaryFile()
tempFile.truncate(numBytes)
buf = mmap.mmap(tempFile.fileno(), numBytes, access=mmap.ACCESS_WRITE)
# Create the ndarray with the memory map as the underlying buffer
obj = super(TempfileBackedArray, subtype).__new__(subtype, shape, dtype, buf, 0, None, order)
# Attach the file reference to the ndarray object
obj._file = tempFile
return obj
def __array_finalize__(self, obj):
if obj is None: return
self._file = getattr(obj, '_file', None)
def arrayFactory(shape, dtype=float):
"""
Creates a new ndarray of the specified shape and datatype, storing
it in memory if there is sufficient available space or else using
a memory-mapped temporary file to provide the underlying buffer.
"""
# Determine the number of bytes required to store the array
requiredBytes = _requiredSize(shape, dtype)
# Determine if there is sufficient available memory
vmem = psutil.virtual_memory()
if vmem.available > requiredBytes:
return np.ndarray(shape=shape, dtype=dtype)
else:
return TempfileBackedArray(shape=shape, dtype=dtype)
def arrayCast(source, dtype):
"""
Casts a NumPy array to the specified datatype, storing the copy
in memory if there is sufficient available space or else using a
memory-mapped temporary file to provide the underlying buffer.
"""
# Determine the number of bytes required to store the array
requiredBytes = _requiredSize(source.shape, dtype)
# Determine if there is sufficient available memory
vmem = psutil.virtual_memory()
if vmem.available > requiredBytes:
return source.astype(dtype, subok=False)
else:
dest = arrayFactory(source.shape, dtype)
np.copyto(dest, source, casting='unsafe')
return dest
def determineMaxWindowSize(dtype, limit=None):
"""
Determines the largest square window size that can be used, based on
the specified datatype and amount of currently available system memory.
If `limit` is specified, then this value will be returned in the event
that it is smaller than the maximum computed size.
"""
vmem = psutil.virtual_memory()
maxSize = math.floor(math.sqrt(vmem.available / np.dtype(dtype).itemsize))
if limit is None or limit >= maxSize:
return maxSize
else:
return limit
|
adamrehn/slidingwindow
|
slidingwindow/ArrayUtils.py
|
arrayCast
|
python
|
def arrayCast(source, dtype):
# Determine the number of bytes required to store the array
requiredBytes = _requiredSize(source.shape, dtype)
# Determine if there is sufficient available memory
vmem = psutil.virtual_memory()
if vmem.available > requiredBytes:
return source.astype(dtype, subok=False)
else:
dest = arrayFactory(source.shape, dtype)
np.copyto(dest, source, casting='unsafe')
return dest
|
Casts a NumPy array to the specified datatype, storing the copy
in memory if there is sufficient available space or else using a
memory-mapped temporary file to provide the underlying buffer.
|
train
|
https://github.com/adamrehn/slidingwindow/blob/17ea9395b48671e8cb7321b9510c6b25fec5e45f/slidingwindow/ArrayUtils.py#L67-L84
|
[
"def _requiredSize(shape, dtype):\n\t\"\"\"\n\tDetermines the number of bytes required to store a NumPy array with\n\tthe specified shape and datatype.\n\t\"\"\"\n\treturn math.floor(np.prod(np.asarray(shape, dtype=np.uint64)) * np.dtype(dtype).itemsize)\n",
"def arrayFactory(shape, dtype=float):\n\t\"\"\"\n\tCreates a new ndarray of the specified shape and datatype, storing\n\tit in memory if there is sufficient available space or else using\n\ta memory-mapped temporary file to provide the underlying buffer.\n\t\"\"\"\n\n\t# Determine the number of bytes required to store the array\n\trequiredBytes = _requiredSize(shape, dtype)\n\n\t# Determine if there is sufficient available memory\n\tvmem = psutil.virtual_memory()\n\tif vmem.available > requiredBytes:\n\t\treturn np.ndarray(shape=shape, dtype=dtype)\n\telse:\n\t\treturn TempfileBackedArray(shape=shape, dtype=dtype)\n"
] |
import math, mmap, tempfile
import numpy as np
import psutil
def _requiredSize(shape, dtype):
"""
Determines the number of bytes required to store a NumPy array with
the specified shape and datatype.
"""
return math.floor(np.prod(np.asarray(shape, dtype=np.uint64)) * np.dtype(dtype).itemsize)
class TempfileBackedArray(np.ndarray):
"""
A NumPy ndarray that uses a memory-mapped temp file as its backing
"""
def __new__(subtype, shape, dtype=float, buffer=None, offset=0, strides=None, order=None, info=None):
# Determine the size in bytes required to hold the array
numBytes = _requiredSize(shape, dtype)
# Create the temporary file, resize it, and map it into memory
tempFile = tempfile.TemporaryFile()
tempFile.truncate(numBytes)
buf = mmap.mmap(tempFile.fileno(), numBytes, access=mmap.ACCESS_WRITE)
# Create the ndarray with the memory map as the underlying buffer
obj = super(TempfileBackedArray, subtype).__new__(subtype, shape, dtype, buf, 0, None, order)
# Attach the file reference to the ndarray object
obj._file = tempFile
return obj
def __array_finalize__(self, obj):
if obj is None: return
self._file = getattr(obj, '_file', None)
def arrayFactory(shape, dtype=float):
"""
Creates a new ndarray of the specified shape and datatype, storing
it in memory if there is sufficient available space or else using
a memory-mapped temporary file to provide the underlying buffer.
"""
# Determine the number of bytes required to store the array
requiredBytes = _requiredSize(shape, dtype)
# Determine if there is sufficient available memory
vmem = psutil.virtual_memory()
if vmem.available > requiredBytes:
return np.ndarray(shape=shape, dtype=dtype)
else:
return TempfileBackedArray(shape=shape, dtype=dtype)
def zerosFactory(shape, dtype=float):
"""
Creates a new NumPy array using `arrayFactory()` and fills it with zeros.
"""
arr = arrayFactory(shape=shape, dtype=dtype)
arr.fill(0)
return arr
def arrayCast(source, dtype):
"""
Casts a NumPy array to the specified datatype, storing the copy
in memory if there is sufficient available space or else using a
memory-mapped temporary file to provide the underlying buffer.
"""
# Determine the number of bytes required to store the array
requiredBytes = _requiredSize(source.shape, dtype)
# Determine if there is sufficient available memory
vmem = psutil.virtual_memory()
if vmem.available > requiredBytes:
return source.astype(dtype, subok=False)
else:
dest = arrayFactory(source.shape, dtype)
np.copyto(dest, source, casting='unsafe')
return dest
def determineMaxWindowSize(dtype, limit=None):
"""
Determines the largest square window size that can be used, based on
the specified datatype and amount of currently available system memory.
If `limit` is specified, then this value will be returned in the event
that it is smaller than the maximum computed size.
"""
vmem = psutil.virtual_memory()
maxSize = math.floor(math.sqrt(vmem.available / np.dtype(dtype).itemsize))
if limit is None or limit >= maxSize:
return maxSize
else:
return limit
|
adamrehn/slidingwindow
|
slidingwindow/ArrayUtils.py
|
determineMaxWindowSize
|
python
|
def determineMaxWindowSize(dtype, limit=None):
vmem = psutil.virtual_memory()
maxSize = math.floor(math.sqrt(vmem.available / np.dtype(dtype).itemsize))
if limit is None or limit >= maxSize:
return maxSize
else:
return limit
|
Determines the largest square window size that can be used, based on
the specified datatype and amount of currently available system memory.
If `limit` is specified, then this value will be returned in the event
that it is smaller than the maximum computed size.
|
train
|
https://github.com/adamrehn/slidingwindow/blob/17ea9395b48671e8cb7321b9510c6b25fec5e45f/slidingwindow/ArrayUtils.py#L87-L100
| null |
import math, mmap, tempfile
import numpy as np
import psutil
def _requiredSize(shape, dtype):
"""
Determines the number of bytes required to store a NumPy array with
the specified shape and datatype.
"""
return math.floor(np.prod(np.asarray(shape, dtype=np.uint64)) * np.dtype(dtype).itemsize)
class TempfileBackedArray(np.ndarray):
"""
A NumPy ndarray that uses a memory-mapped temp file as its backing
"""
def __new__(subtype, shape, dtype=float, buffer=None, offset=0, strides=None, order=None, info=None):
# Determine the size in bytes required to hold the array
numBytes = _requiredSize(shape, dtype)
# Create the temporary file, resize it, and map it into memory
tempFile = tempfile.TemporaryFile()
tempFile.truncate(numBytes)
buf = mmap.mmap(tempFile.fileno(), numBytes, access=mmap.ACCESS_WRITE)
# Create the ndarray with the memory map as the underlying buffer
obj = super(TempfileBackedArray, subtype).__new__(subtype, shape, dtype, buf, 0, None, order)
# Attach the file reference to the ndarray object
obj._file = tempFile
return obj
def __array_finalize__(self, obj):
if obj is None: return
self._file = getattr(obj, '_file', None)
def arrayFactory(shape, dtype=float):
"""
Creates a new ndarray of the specified shape and datatype, storing
it in memory if there is sufficient available space or else using
a memory-mapped temporary file to provide the underlying buffer.
"""
# Determine the number of bytes required to store the array
requiredBytes = _requiredSize(shape, dtype)
# Determine if there is sufficient available memory
vmem = psutil.virtual_memory()
if vmem.available > requiredBytes:
return np.ndarray(shape=shape, dtype=dtype)
else:
return TempfileBackedArray(shape=shape, dtype=dtype)
def zerosFactory(shape, dtype=float):
"""
Creates a new NumPy array using `arrayFactory()` and fills it with zeros.
"""
arr = arrayFactory(shape=shape, dtype=dtype)
arr.fill(0)
return arr
def arrayCast(source, dtype):
"""
Casts a NumPy array to the specified datatype, storing the copy
in memory if there is sufficient available space or else using a
memory-mapped temporary file to provide the underlying buffer.
"""
# Determine the number of bytes required to store the array
requiredBytes = _requiredSize(source.shape, dtype)
# Determine if there is sufficient available memory
vmem = psutil.virtual_memory()
if vmem.available > requiredBytes:
return source.astype(dtype, subok=False)
else:
dest = arrayFactory(source.shape, dtype)
np.copyto(dest, source, casting='unsafe')
return dest
def determineMaxWindowSize(dtype, limit=None):
"""
Determines the largest square window size that can be used, based on
the specified datatype and amount of currently available system memory.
If `limit` is specified, then this value will be returned in the event
that it is smaller than the maximum computed size.
"""
vmem = psutil.virtual_memory()
maxSize = math.floor(math.sqrt(vmem.available / np.dtype(dtype).itemsize))
if limit is None or limit >= maxSize:
return maxSize
else:
return limit
|
PyCQA/pylint-django
|
pylint_django/checkers/__init__.py
|
register_checkers
|
python
|
def register_checkers(linter):
linter.register_checker(ModelChecker(linter))
linter.register_checker(DjangoInstalledChecker(linter))
linter.register_checker(JsonResponseChecker(linter))
linter.register_checker(FormChecker(linter))
|
Register checkers.
|
train
|
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/checkers/__init__.py#L8-L13
| null |
"""Checkers."""
from pylint_django.checkers.django_installed import DjangoInstalledChecker
from pylint_django.checkers.models import ModelChecker
from pylint_django.checkers.json_response import JsonResponseChecker
from pylint_django.checkers.forms import FormChecker
|
PyCQA/pylint-django
|
pylint_django/checkers/db_performance.py
|
register
|
python
|
def register(linter):
linter.register_checker(NewDbFieldWithDefaultChecker(linter))
if not compat.LOAD_CONFIGURATION_SUPPORTED:
load_configuration(linter)
|
Required method to auto register this checker.
|
train
|
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/checkers/db_performance.py#L125-L129
|
[
"def load_configuration(linter):\n # don't blacklist migrations for this checker\n new_black_list = list(linter.config.black_list)\n if 'migrations' in new_black_list:\n new_black_list.remove('migrations')\n linter.config.black_list = new_black_list\n"
] |
# Copyright (c) 2018 Alexander Todorov <atodorov@MrSenko.com>
# Licensed under the GPL 2.0: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint-django/blob/master/LICENSE
"""
Various DB performance suggestions. Disabled by default! Enable with
pylint --load-plugins=pylint_django.checkers.db_performance
"""
import astroid
from pylint import interfaces
from pylint import checkers
from pylint.checkers import utils
from pylint_django.__pkginfo__ import BASE_ID
from pylint_django import compat
def _is_addfield_with_default(call):
if not isinstance(call.func, astroid.Attribute):
return False
if not call.func.attrname == 'AddField':
return False
for keyword in call.keywords:
# looking for AddField(..., field=XXX(..., default=Y, ...), ...)
if keyword.arg == 'field' and isinstance(keyword.value, astroid.Call):
# loop over XXX's keywords
# NOTE: not checking if XXX is an actual field type because there could
# be many types we're not aware of. Also the migration will probably break
# if XXX doesn't instantiate a field object!
for field_keyword in keyword.value.keywords:
if field_keyword.arg == 'default':
return True
return False
def _is_migrations_module(node):
if not isinstance(node, astroid.Module):
return False
return 'migrations' in node.path[0] and not node.path[0].endswith('__init__.py')
class NewDbFieldWithDefaultChecker(checkers.BaseChecker):
"""
Looks for migrations which add new model fields and these fields have a
default value. According to Django docs this may have performance penalties
especially on large tables:
https://docs.djangoproject.com/en/2.0/topics/migrations/#postgresql
The prefered way is to add a new DB column with null=True because it will
be created instantly and then possibly populate the table with the
desired default values.
"""
__implements__ = (interfaces.IAstroidChecker,)
# configuration section name
name = 'new-db-field-with-default'
msgs = {'W%s98' % BASE_ID: ("%s AddField with default value",
'new-db-field-with-default',
'Used when Pylint detects migrations adding new '
'fields with a default value.')}
_migration_modules = []
_possible_offences = {}
def visit_module(self, node):
if _is_migrations_module(node):
self._migration_modules.append(node)
def visit_call(self, node):
try:
module = node.frame().parent
except: # noqa: E722, pylint: disable=bare-except
return
if not _is_migrations_module(module):
return
if _is_addfield_with_default(node):
if module not in self._possible_offences:
self._possible_offences[module] = []
if node not in self._possible_offences[module]:
self._possible_offences[module].append(node)
@utils.check_messages('new-db-field-with-default')
def close(self):
def _path(node):
return node.path
# sort all migrations by name in reverse order b/c
# we need only the latest ones
self._migration_modules.sort(key=_path, reverse=True)
# filter out the last migration modules under each distinct
# migrations directory, iow leave only the latest migrations
# for each application
last_name_space = ''
latest_migrations = []
for module in self._migration_modules:
name_space = module.path[0].split('migrations')[0]
if name_space != last_name_space:
last_name_space = name_space
latest_migrations.append(module)
for module, nodes in self._possible_offences.items():
if module in latest_migrations:
for node in nodes:
self.add_message('new-db-field-with-default', args=module.name, node=node)
def load_configuration(linter):
# don't blacklist migrations for this checker
new_black_list = list(linter.config.black_list)
if 'migrations' in new_black_list:
new_black_list.remove('migrations')
linter.config.black_list = new_black_list
|
PyCQA/pylint-django
|
pylint_django/checkers/forms.py
|
FormChecker.visit_classdef
|
python
|
def visit_classdef(self, node):
if not node_is_subclass(node, 'django.forms.models.ModelForm', '.ModelForm'):
# we only care about forms
return
meta = _get_child_meta(node)
if not meta:
return
for child in meta.get_children():
if not isinstance(child, Assign):
continue
if child.targets[0].name == 'exclude':
self.add_message('W%s04' % BASE_ID, node=child)
break
|
Class visitor.
|
train
|
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/checkers/forms.py#L32-L49
|
[
"def node_is_subclass(cls, *subclass_names):\n \"\"\"Checks if cls node has parent with subclass_name.\"\"\"\n if not isinstance(cls, (ClassDef, Instance)):\n return False\n\n if cls.bases == Uninferable:\n return False\n for base_cls in cls.bases:\n try:\n for inf in base_cls.inferred():\n if inf.qname() in subclass_names:\n return True\n if inf != cls and node_is_subclass(inf, *subclass_names):\n # check up the hierarchy in case we are a subclass of\n # a subclass of a subclass ...\n return True\n except InferenceError:\n continue\n\n return False\n",
"def _get_child_meta(node):\n for child in node.get_children():\n if isinstance(child, ClassDef) and child.name == 'Meta':\n return child\n return None\n"
] |
class FormChecker(BaseChecker):
"""Django model checker."""
__implements__ = IAstroidChecker
name = 'django-form-checker'
msgs = {
'W%d04' % BASE_ID: ("Use explicit fields instead of exclude in ModelForm",
'modelform-uses-exclude',
"Prevents accidentally allowing users to set fields, "
"especially when adding new fields to a Model")
}
@check_messages('modelform-uses-exclude')
|
PyCQA/pylint-django
|
pylint_django/augmentations/__init__.py
|
ignore_import_warnings_for_related_fields
|
python
|
def ignore_import_warnings_for_related_fields(orig_method, self, node):
consumer = self._to_consume[0] # pylint: disable=W0212
# we can disable this warning ('Access to a protected member _to_consume of a client class')
# as it's not actually a client class, but rather, this method is being monkey patched
# onto the class and so the access is valid
new_things = {}
iterat = consumer.to_consume.items if PY3 else consumer.to_consume.iteritems
for name, stmts in iterat():
if isinstance(stmts[0], ImportFrom):
if any([n[0] in ('ForeignKey', 'OneToOneField') for n in stmts[0].names]):
continue
new_things[name] = stmts
consumer._atomic = ScopeConsumer(new_things, consumer.consumed, consumer.scope_type) # pylint: disable=W0212
self._to_consume = [consumer] # pylint: disable=W0212
return orig_method(self, node)
|
Replaces the leave_module method on the VariablesChecker class to
prevent unused-import warnings which are caused by the ForeignKey
and OneToOneField transformations. By replacing the nodes in the
AST with their type rather than the django field, imports of the
form 'from django.db.models import OneToOneField' raise an unused-import
warning
|
train
|
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/augmentations/__init__.py#L291-L317
| null |
"""Augmentations."""
# pylint: disable=invalid-name
import itertools
from astroid import InferenceError
from astroid.objects import Super
from astroid.nodes import ClassDef, ImportFrom, Attribute
from astroid.scoped_nodes import ClassDef as ScopedClass, Module
from pylint.checkers.base import DocStringChecker, NameChecker
from pylint.checkers.design_analysis import MisdesignChecker
from pylint.checkers.classes import ClassChecker
from pylint.checkers.newstyle import NewStyleConflictChecker
from pylint.checkers.variables import VariablesChecker
from pylint.checkers.typecheck import TypeChecker
from pylint.checkers.variables import ScopeConsumer
from pylint_plugin_utils import augment_visit, suppress_message
from django import VERSION as django_version
from django.views.generic.base import View, RedirectView, ContextMixin
from django.views.generic.dates import DateMixin, DayMixin, MonthMixin, WeekMixin, YearMixin
from django.views.generic.detail import SingleObjectMixin, SingleObjectTemplateResponseMixin, TemplateResponseMixin
from django.views.generic.edit import DeletionMixin, FormMixin, ModelFormMixin
from django.views.generic.list import MultipleObjectMixin, MultipleObjectTemplateResponseMixin
from django.utils import termcolors
from pylint_django.utils import node_is_subclass, PY3
# Note: it would have been nice to import the Manager object from Django and
# get its attributes that way - and this used to be the method - but unfortunately
# there's no guarantee that Django is properly configured at that stage, and importing
# anything from the django.db package causes an ImproperlyConfigured exception.
# Therefore we'll fall back on a hard-coded list of attributes which won't be as accurate,
# but this is not 100% accurate anyway.
MANAGER_ATTRS = {
'none',
'all',
'count',
'dates',
'distinct',
'extra',
'get',
'get_or_create',
'get_queryset',
'create',
'bulk_create',
'filter',
'aggregate',
'annotate',
'complex_filter',
'exclude',
'in_bulk',
'iterator',
'latest',
'order_by',
'select_for_update',
'select_related',
'prefetch_related',
'values',
'values_list',
'update',
'reverse',
'defer',
'only',
'using',
'exists',
}
QS_ATTRS = {
'filter',
'exclude',
'annotate',
'order_by',
'reverse',
'distinct',
'values',
'values_list',
'dates',
'datetimes',
'none',
'all',
'select_related',
'prefetch_related',
'extra',
'defer',
'only',
'using',
'select_for_update',
'raw',
'get',
'create',
'get_or_create',
'update_or_create',
'bulk_create',
'count',
'in_bulk',
'iterator',
'latest',
'earliest',
'first',
'last',
'aggregate',
'exists',
'update',
'delete',
'as_manager',
'expression',
'output_field',
}
MODELADMIN_ATTRS = {
# options
'actions',
'actions_on_top',
'actions_on_bottom',
'actions_selection_counter',
'date_hierarchy',
'empty_value_display',
'exclude',
'fields',
'fieldsets',
'filter_horizontal',
'filter_vertical',
'form',
'formfield_overrides',
'inlines',
'list_display',
'list_display_links',
'list_editable',
'list_filter',
'list_max_show_all',
'list_per_page',
'list_select_related',
'ordering',
'paginator',
'prepopulated_fields',
'preserve_filters',
'radio_fields',
'raw_id_fields',
'readonly_fields',
'save_as',
'save_on_top',
'search_fields',
'show_full_result_count',
'view_on_site',
# template options
'add_form_template',
'change_form_template',
'change_list_template',
'delete_confirmation_template',
'delete_selected_confirmation_template',
'object_history_template',
}
MODEL_ATTRS = {
'id',
'DoesNotExist',
'MultipleObjectsReturned',
'_base_manager',
'_default_manager',
'_meta',
'delete',
'get_next_by_date',
'get_previous_by_date',
'objects',
'save',
}
FIELD_ATTRS = {
'null',
'blank',
'choices',
'db_column',
'db_index',
'db_tablespace',
'default',
'editable',
'error_messages',
'help_text',
'primary_key',
'unique',
'unique_for_date',
'unique_for_month',
'unique_for_year',
'verbose_name',
'validators',
}
CHAR_FIELD_ATTRS = {
'max_length',
}
DATE_FIELD_ATTRS = {
'auto_now',
'auto_now_add',
}
DECIMAL_FIELD_ATTRS = {
'max_digits',
'decimal_places',
}
FILE_FIELD_ATTRS = {
'upload_to',
'storage',
}
IMAGE_FIELD_ATTRS = {
'height_field',
'width_field',
}
IP_FIELD_ATTRS = {
'protocol',
'unpack_ipv4',
}
SLUG_FIELD_ATTRS = {
'allow_unicode',
}
FOREIGNKEY_FIELD_ATTRS = {
'limit_choices_to',
'related_name',
'related_query_name',
'to_field',
'db_constraint',
'swappable',
}
MANYTOMANY_FIELD_ATTRS = {
'add',
'clear',
'related_name',
'related_query_name',
'limit_choices_to',
'symmetrical',
'through',
'through_fields',
'db_table',
'db_constraint',
'swappable',
}
ONETOONE_FIELD_ATTRS = {
'parent_link',
}
STYLE_ATTRS = set(itertools.chain.from_iterable(termcolors.PALETTES.values()))
VIEW_ATTRS = {
(
(
'{}.{}'.format(cls.__module__, cls.__name__),
'.{}'.format(cls.__name__)
),
tuple(cls.__dict__.keys())
) for cls in (
View, RedirectView, ContextMixin,
DateMixin, DayMixin, MonthMixin, WeekMixin, YearMixin,
SingleObjectMixin, SingleObjectTemplateResponseMixin, TemplateResponseMixin,
DeletionMixin, FormMixin, ModelFormMixin,
MultipleObjectMixin, MultipleObjectTemplateResponseMixin,
)
}
FORM_ATTRS = {
'declared_fields',
}
def foreign_key_sets(chain, node):
"""
When a Django model has a ForeignKey to another model, the target
of the foreign key gets a '<modelname>_set' attribute for accessing
a queryset of the model owning the foreign key - eg:
class ModelA(models.Model):
pass
class ModelB(models.Model):
a = models.ForeignKey(ModelA)
Now, ModelA instances will have a modelb_set attribute.
It's also possible to explicitly name the relationship using the related_name argument
to the ForeignKey constructor. As it's impossible to know this without inspecting all
models before processing, we'll instead do a "best guess" approach and see if the attribute
being accessed goes on to be used as a queryset. This is via 'duck typing': if the method
called on the attribute being accessed is something we might find in a queryset, we'll
warn.
"""
quack = False
if node.attrname in MANAGER_ATTRS or node.attrname.endswith('_set'):
# if this is a X_set method, that's a pretty strong signal that this is the default
# Django name, rather than one set by related_name
quack = True
else:
# we will
if isinstance(node.parent, Attribute):
func_name = getattr(node.parent, 'attrname', None)
if func_name in MANAGER_ATTRS:
quack = True
if quack:
children = list(node.get_children())
for child in children:
try:
inferred_cls = child.inferred()
except InferenceError:
pass
else:
for cls in inferred_cls:
if (node_is_subclass(cls,
'django.db.models.manager.Manager',
'django.db.models.base.Model',
'.Model',
'django.db.models.fields.related.ForeignObject')):
# This means that we are looking at a subclass of models.Model
# and something is trying to access a <something>_set attribute.
# Since this could exist, we will return so as not to raise an
# error.
return
chain()
def foreign_key_ids(chain, node):
if node.attrname.endswith('_id'):
return
chain()
def is_model_admin_subclass(node):
"""Checks that node is derivative of ModelAdmin class."""
if node.name[-5:] != 'Admin' or isinstance(node.parent, ClassDef):
return False
return node_is_subclass(node, 'django.contrib.admin.options.ModelAdmin')
def is_model_media_subclass(node):
"""Checks that node is derivative of Media class."""
if node.name != 'Media' or not isinstance(node.parent, ClassDef):
return False
parents = ('django.contrib.admin.options.ModelAdmin',
'django.forms.widgets.Media',
'django.db.models.base.Model',
'.Model', # for the transformed version used in this plugin
'django.forms.forms.Form',
'.Form',
'django.forms.widgets.Widget',
'.Widget',
'django.forms.models.ModelForm',
'.ModelForm')
return node_is_subclass(node.parent, *parents)
def is_model_meta_subclass(node):
"""Checks that node is derivative of Meta class."""
if node.name != 'Meta' or not isinstance(node.parent, ClassDef):
return False
parents = ('.Model', # for the transformed version used here
'django.db.models.base.Model',
'.Form',
'django.forms.forms.Form',
'.ModelForm',
'django.forms.models.ModelForm',
'rest_framework.serializers.BaseSerializer',
'rest_framework.generics.GenericAPIView',
'rest_framework.viewsets.ReadOnlyModelViewSet',
'rest_framework.viewsets.ModelViewSet',
'django_filters.filterset.FilterSet',
'factory.django.DjangoModelFactory',)
return node_is_subclass(node.parent, *parents)
def is_model_factory(node):
"""Checks that node is derivative of DjangoModelFactory or SubFactory class."""
try:
parent_classes = node.expr.inferred()
except: # noqa: E722, pylint: disable=bare-except
return False
parents = ('factory.declarations.LazyFunction',
'factory.declarations.SubFactory',
'factory.django.DjangoModelFactory')
for parent_class in parent_classes:
try:
if parent_class.qname() in parents:
return True
if node_is_subclass(parent_class, *parents):
return True
except AttributeError:
continue
return False
def is_factory_post_generation_method(node):
if not node.decorators:
return False
for decorator in node.decorators.get_children():
try:
inferred = decorator.inferred()
except InferenceError:
continue
for target in inferred:
if target.qname() == 'factory.helpers.post_generation':
return True
return False
def is_model_mpttmeta_subclass(node):
"""Checks that node is derivative of MPTTMeta class."""
if node.name != 'MPTTMeta' or not isinstance(node.parent, ClassDef):
return False
parents = ('django.db.models.base.Model',
'.Model', # for the transformed version used in this plugin
'django.forms.forms.Form',
'.Form',
'django.forms.models.ModelForm',
'.ModelForm')
return node_is_subclass(node.parent, *parents)
def _attribute_is_magic(node, attrs, parents):
"""Checks that node is an attribute used inside one of allowed parents"""
if node.attrname not in attrs:
return False
if not node.last_child():
return False
try:
for cls in node.last_child().inferred():
if isinstance(cls, Super):
cls = cls._self_class # pylint: disable=protected-access
if node_is_subclass(cls, *parents) or cls.qname() in parents:
return True
except InferenceError:
pass
return False
def is_style_attribute(node):
parents = ('django.core.management.color.Style', )
return _attribute_is_magic(node, STYLE_ATTRS, parents)
def is_manager_attribute(node):
"""Checks that node is attribute of Manager or QuerySet class."""
parents = ('django.db.models.manager.Manager',
'.Manager',
'factory.base.BaseFactory.build',
'django.db.models.query.QuerySet',
'.QuerySet')
return _attribute_is_magic(node, MANAGER_ATTRS.union(QS_ATTRS), parents)
def is_admin_attribute(node):
"""Checks that node is attribute of BaseModelAdmin."""
parents = ('django.contrib.admin.options.BaseModelAdmin',
'.BaseModelAdmin')
return _attribute_is_magic(node, MODELADMIN_ATTRS, parents)
def is_model_attribute(node):
"""Checks that node is attribute of Model."""
parents = ('django.db.models.base.Model',
'.Model')
return _attribute_is_magic(node, MODEL_ATTRS, parents)
def is_field_attribute(node):
"""Checks that node is attribute of Field."""
parents = ('django.db.models.fields.Field',
'.Field')
return _attribute_is_magic(node, FIELD_ATTRS, parents)
def is_charfield_attribute(node):
"""Checks that node is attribute of CharField."""
parents = ('django.db.models.fields.CharField',
'.CharField')
return _attribute_is_magic(node, CHAR_FIELD_ATTRS, parents)
def is_datefield_attribute(node):
"""Checks that node is attribute of DateField."""
parents = ('django.db.models.fields.DateField',
'.DateField')
return _attribute_is_magic(node, DATE_FIELD_ATTRS, parents)
def is_decimalfield_attribute(node):
"""Checks that node is attribute of DecimalField."""
parents = ('django.db.models.fields.DecimalField',
'.DecimalField')
return _attribute_is_magic(node, DECIMAL_FIELD_ATTRS, parents)
def is_filefield_attribute(node):
"""Checks that node is attribute of FileField."""
parents = ('django.db.models.fields.files.FileField',
'.FileField')
return _attribute_is_magic(node, FILE_FIELD_ATTRS, parents)
def is_imagefield_attribute(node):
"""Checks that node is attribute of ImageField."""
parents = ('django.db.models.fields.files.ImageField',
'.ImageField')
return _attribute_is_magic(node, IMAGE_FIELD_ATTRS, parents)
def is_ipfield_attribute(node):
"""Checks that node is attribute of GenericIPAddressField."""
parents = ('django.db.models.fields.GenericIPAddressField',
'.GenericIPAddressField')
return _attribute_is_magic(node, IP_FIELD_ATTRS, parents)
def is_slugfield_attribute(node):
"""Checks that node is attribute of SlugField."""
parents = ('django.db.models.fields.SlugField',
'.SlugField')
return _attribute_is_magic(node, SLUG_FIELD_ATTRS, parents)
def is_foreignkeyfield_attribute(node):
"""Checks that node is attribute of ForeignKey."""
parents = ('django.db.models.fields.related.ForeignKey',
'.ForeignKey')
return _attribute_is_magic(node, FOREIGNKEY_FIELD_ATTRS, parents)
def is_manytomanyfield_attribute(node):
"""Checks that node is attribute of ManyToManyField."""
parents = ('django.db.models.fields.related.ManyToManyField',
'.ManyToManyField')
return _attribute_is_magic(node, MANYTOMANY_FIELD_ATTRS, parents)
def is_onetoonefield_attribute(node):
"""Checks that node is attribute of OneToOneField."""
parents = ('django.db.models.fields.related.OneToOneField',
'.OneToOneField')
return _attribute_is_magic(node, ONETOONE_FIELD_ATTRS, parents)
def is_form_attribute(node):
"""Checks that node is attribute of Form."""
parents = ('django.forms.forms.Form', 'django.forms.models.ModelForm')
return _attribute_is_magic(node, FORM_ATTRS, parents)
def is_model_test_case_subclass(node):
"""Checks that node is derivative of TestCase class."""
if not node.name.endswith('Test') and not isinstance(node.parent, ClassDef):
return False
return node_is_subclass(node, 'django.test.testcases.TestCase')
def generic_is_view_attribute(parents, attrs):
"""Generates is_X_attribute function for given parents and attrs."""
def is_attribute(node):
return _attribute_is_magic(node, attrs, parents)
return is_attribute
def is_model_view_subclass_method_shouldnt_be_function(node):
"""Checks that node is get or post method of the View class."""
if node.name not in ('get', 'post'):
return False
parent = node.parent
while parent and not isinstance(parent, ScopedClass):
parent = parent.parent
subclass = ('django.views.View',
'django.views.generic.View',
'django.views.generic.base.View',)
return parent is not None and node_is_subclass(parent, *subclass)
def is_model_view_subclass_unused_argument(node):
"""
Checks that node is get or post method of the View class and it has valid arguments.
TODO: Bad checkings, need to be more smart.
"""
if not is_model_view_subclass_method_shouldnt_be_function(node):
return False
return is_argument_named_request(node)
def is_argument_named_request(node):
"""
If an unused-argument is named 'request' ignore that!
"""
return 'request' in node.argnames()
def is_model_field_display_method(node):
"""Accept model's fields with get_*_display names."""
if not node.attrname.endswith('_display'):
return False
if not node.attrname.startswith('get_'):
return False
if node.last_child():
# TODO: could validate the names of the fields on the model rather than
# blindly accepting get_*_display
try:
for cls in node.last_child().inferred():
if node_is_subclass(cls, 'django.db.models.base.Model', '.Model'):
return True
except InferenceError:
return False
return False
def is_model_media_valid_attributes(node):
"""Suppress warnings for valid attributes of Media class."""
if node.name not in ('js', ):
return False
parent = node.parent
while parent and not isinstance(parent, ScopedClass):
parent = parent.parent
if parent is None or parent.name != "Media":
return False
return True
def is_templatetags_module_valid_constant(node):
"""Suppress warnings for valid constants in templatetags module."""
if node.name not in ('register', ):
return False
parent = node.parent
while not isinstance(parent, Module):
parent = parent.parent
if "templatetags." not in parent.name:
return False
return True
def is_urls_module_valid_constant(node):
"""Suppress warnings for valid constants in urls module."""
if node.name not in ('urlpatterns', 'app_name'):
return False
parent = node.parent
while not isinstance(parent, Module):
parent = parent.parent
if not parent.name.endswith('urls'):
return False
return True
def allow_meta_protected_access(node):
if django_version >= (1, 8):
return node.attrname == '_meta'
return False
def is_class(class_name):
"""Shortcut for node_is_subclass."""
return lambda node: node_is_subclass(node, class_name)
def wrap(orig_method, with_method):
def wrap_func(*args, **kwargs):
with_method(orig_method, *args, **kwargs)
return wrap_func
def is_wsgi_application(node):
frame = node.frame()
return node.name == 'application' and isinstance(frame, Module) and \
(frame.name == 'wsgi' or frame.path[0].endswith('wsgi.py') or frame.file.endswith('wsgi.py'))
# Compat helpers
def pylint_newstyle_classdef_compat(linter, warning_name, augment):
if not hasattr(NewStyleConflictChecker, 'visit_classdef'):
return
suppress_message(linter, getattr(NewStyleConflictChecker, 'visit_classdef'), warning_name, augment)
# augment things
def apply_augmentations(linter):
"""Apply augmentation and suppression rules."""
augment_visit(linter, TypeChecker.visit_attribute, foreign_key_sets)
augment_visit(linter, TypeChecker.visit_attribute, foreign_key_ids)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_field_display_method)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_style_attribute)
suppress_message(linter, NameChecker.visit_assignname, 'invalid-name', is_urls_module_valid_constant)
# supress errors when accessing magical class attributes
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_manager_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_admin_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_field_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_charfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_datefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_decimalfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_filefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_imagefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_ipfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_slugfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_foreignkeyfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_manytomanyfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_onetoonefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_form_attribute)
for parents, attrs in VIEW_ATTRS:
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', generic_is_view_attribute(parents, attrs))
# formviews have too many ancestors, there's nothing the user of the library can do about that
suppress_message(linter, MisdesignChecker.visit_classdef, 'too-many-ancestors',
is_class('django.views.generic.edit.FormView'))
# model forms have no __init__ method anywhere in their bases
suppress_message(linter, ClassChecker.visit_classdef, 'W0232', is_class('django.forms.models.ModelForm'))
# Meta
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_meta_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_meta_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'no-init', is_model_meta_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_meta_subclass)
suppress_message(linter, ClassChecker.visit_attribute, 'protected-access', allow_meta_protected_access)
# Media
suppress_message(linter, NameChecker.visit_assignname, 'C0103', is_model_media_valid_attributes)
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_media_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_media_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'no-init', is_model_media_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_media_subclass)
# Admin
# Too many public methods (40+/20)
# TODO: Count public methods of django.contrib.admin.options.ModelAdmin and increase
# MisdesignChecker.config.max_public_methods to this value to count only user' methods.
# nb_public_methods = 0
# for method in node.methods():
# if not method.name.startswith('_'):
# nb_public_methods += 1
suppress_message(linter, MisdesignChecker.leave_classdef, 'R0904', is_model_admin_subclass)
# Tests
suppress_message(linter, MisdesignChecker.leave_classdef, 'R0904', is_model_test_case_subclass)
# View
# Method could be a function (get, post)
suppress_message(linter, ClassChecker.leave_functiondef, 'no-self-use',
is_model_view_subclass_method_shouldnt_be_function)
# Unused argument 'request' (get, post)
suppress_message(linter, VariablesChecker.leave_functiondef, 'unused-argument',
is_model_view_subclass_unused_argument)
suppress_message(linter, VariablesChecker.leave_functiondef, 'unused-argument', is_argument_named_request)
# django-mptt
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_mpttmeta_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_mpttmeta_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'W0232', is_model_mpttmeta_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_mpttmeta_subclass)
# factory_boy's DjangoModelFactory
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_factory)
suppress_message(linter, ClassChecker.visit_functiondef, 'no-self-argument', is_factory_post_generation_method)
# ForeignKey and OneToOneField
# Must update this in a thread safe way to support the parallel option on pylint (-j)
current_leave_module = VariablesChecker.leave_module
if current_leave_module.__name__ == 'leave_module':
# current_leave_module is not wrapped
# Two threads may hit the next assignment concurrently, but the result is the same
VariablesChecker.leave_module = wrap(current_leave_module, ignore_import_warnings_for_related_fields)
# VariablesChecker.leave_module is now wrapped
# else VariablesChecker.leave_module is already wrapped
# wsgi.py
suppress_message(linter, NameChecker.visit_assignname, 'invalid-name', is_wsgi_application)
|
PyCQA/pylint-django
|
pylint_django/augmentations/__init__.py
|
foreign_key_sets
|
python
|
def foreign_key_sets(chain, node):
quack = False
if node.attrname in MANAGER_ATTRS or node.attrname.endswith('_set'):
# if this is a X_set method, that's a pretty strong signal that this is the default
# Django name, rather than one set by related_name
quack = True
else:
# we will
if isinstance(node.parent, Attribute):
func_name = getattr(node.parent, 'attrname', None)
if func_name in MANAGER_ATTRS:
quack = True
if quack:
children = list(node.get_children())
for child in children:
try:
inferred_cls = child.inferred()
except InferenceError:
pass
else:
for cls in inferred_cls:
if (node_is_subclass(cls,
'django.db.models.manager.Manager',
'django.db.models.base.Model',
'.Model',
'django.db.models.fields.related.ForeignObject')):
# This means that we are looking at a subclass of models.Model
# and something is trying to access a <something>_set attribute.
# Since this could exist, we will return so as not to raise an
# error.
return
chain()
|
When a Django model has a ForeignKey to another model, the target
of the foreign key gets a '<modelname>_set' attribute for accessing
a queryset of the model owning the foreign key - eg:
class ModelA(models.Model):
pass
class ModelB(models.Model):
a = models.ForeignKey(ModelA)
Now, ModelA instances will have a modelb_set attribute.
It's also possible to explicitly name the relationship using the related_name argument
to the ForeignKey constructor. As it's impossible to know this without inspecting all
models before processing, we'll instead do a "best guess" approach and see if the attribute
being accessed goes on to be used as a queryset. This is via 'duck typing': if the method
called on the attribute being accessed is something we might find in a queryset, we'll
warn.
|
train
|
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/augmentations/__init__.py#L320-L373
|
[
"def node_is_subclass(cls, *subclass_names):\n \"\"\"Checks if cls node has parent with subclass_name.\"\"\"\n if not isinstance(cls, (ClassDef, Instance)):\n return False\n\n if cls.bases == Uninferable:\n return False\n for base_cls in cls.bases:\n try:\n for inf in base_cls.inferred():\n if inf.qname() in subclass_names:\n return True\n if inf != cls and node_is_subclass(inf, *subclass_names):\n # check up the hierarchy in case we are a subclass of\n # a subclass of a subclass ...\n return True\n except InferenceError:\n continue\n\n return False\n"
] |
"""Augmentations."""
# pylint: disable=invalid-name
import itertools
from astroid import InferenceError
from astroid.objects import Super
from astroid.nodes import ClassDef, ImportFrom, Attribute
from astroid.scoped_nodes import ClassDef as ScopedClass, Module
from pylint.checkers.base import DocStringChecker, NameChecker
from pylint.checkers.design_analysis import MisdesignChecker
from pylint.checkers.classes import ClassChecker
from pylint.checkers.newstyle import NewStyleConflictChecker
from pylint.checkers.variables import VariablesChecker
from pylint.checkers.typecheck import TypeChecker
from pylint.checkers.variables import ScopeConsumer
from pylint_plugin_utils import augment_visit, suppress_message
from django import VERSION as django_version
from django.views.generic.base import View, RedirectView, ContextMixin
from django.views.generic.dates import DateMixin, DayMixin, MonthMixin, WeekMixin, YearMixin
from django.views.generic.detail import SingleObjectMixin, SingleObjectTemplateResponseMixin, TemplateResponseMixin
from django.views.generic.edit import DeletionMixin, FormMixin, ModelFormMixin
from django.views.generic.list import MultipleObjectMixin, MultipleObjectTemplateResponseMixin
from django.utils import termcolors
from pylint_django.utils import node_is_subclass, PY3
# Note: it would have been nice to import the Manager object from Django and
# get its attributes that way - and this used to be the method - but unfortunately
# there's no guarantee that Django is properly configured at that stage, and importing
# anything from the django.db package causes an ImproperlyConfigured exception.
# Therefore we'll fall back on a hard-coded list of attributes which won't be as accurate,
# but this is not 100% accurate anyway.
MANAGER_ATTRS = {
'none',
'all',
'count',
'dates',
'distinct',
'extra',
'get',
'get_or_create',
'get_queryset',
'create',
'bulk_create',
'filter',
'aggregate',
'annotate',
'complex_filter',
'exclude',
'in_bulk',
'iterator',
'latest',
'order_by',
'select_for_update',
'select_related',
'prefetch_related',
'values',
'values_list',
'update',
'reverse',
'defer',
'only',
'using',
'exists',
}
QS_ATTRS = {
'filter',
'exclude',
'annotate',
'order_by',
'reverse',
'distinct',
'values',
'values_list',
'dates',
'datetimes',
'none',
'all',
'select_related',
'prefetch_related',
'extra',
'defer',
'only',
'using',
'select_for_update',
'raw',
'get',
'create',
'get_or_create',
'update_or_create',
'bulk_create',
'count',
'in_bulk',
'iterator',
'latest',
'earliest',
'first',
'last',
'aggregate',
'exists',
'update',
'delete',
'as_manager',
'expression',
'output_field',
}
MODELADMIN_ATTRS = {
# options
'actions',
'actions_on_top',
'actions_on_bottom',
'actions_selection_counter',
'date_hierarchy',
'empty_value_display',
'exclude',
'fields',
'fieldsets',
'filter_horizontal',
'filter_vertical',
'form',
'formfield_overrides',
'inlines',
'list_display',
'list_display_links',
'list_editable',
'list_filter',
'list_max_show_all',
'list_per_page',
'list_select_related',
'ordering',
'paginator',
'prepopulated_fields',
'preserve_filters',
'radio_fields',
'raw_id_fields',
'readonly_fields',
'save_as',
'save_on_top',
'search_fields',
'show_full_result_count',
'view_on_site',
# template options
'add_form_template',
'change_form_template',
'change_list_template',
'delete_confirmation_template',
'delete_selected_confirmation_template',
'object_history_template',
}
MODEL_ATTRS = {
'id',
'DoesNotExist',
'MultipleObjectsReturned',
'_base_manager',
'_default_manager',
'_meta',
'delete',
'get_next_by_date',
'get_previous_by_date',
'objects',
'save',
}
FIELD_ATTRS = {
'null',
'blank',
'choices',
'db_column',
'db_index',
'db_tablespace',
'default',
'editable',
'error_messages',
'help_text',
'primary_key',
'unique',
'unique_for_date',
'unique_for_month',
'unique_for_year',
'verbose_name',
'validators',
}
CHAR_FIELD_ATTRS = {
'max_length',
}
DATE_FIELD_ATTRS = {
'auto_now',
'auto_now_add',
}
DECIMAL_FIELD_ATTRS = {
'max_digits',
'decimal_places',
}
FILE_FIELD_ATTRS = {
'upload_to',
'storage',
}
IMAGE_FIELD_ATTRS = {
'height_field',
'width_field',
}
IP_FIELD_ATTRS = {
'protocol',
'unpack_ipv4',
}
SLUG_FIELD_ATTRS = {
'allow_unicode',
}
FOREIGNKEY_FIELD_ATTRS = {
'limit_choices_to',
'related_name',
'related_query_name',
'to_field',
'db_constraint',
'swappable',
}
MANYTOMANY_FIELD_ATTRS = {
'add',
'clear',
'related_name',
'related_query_name',
'limit_choices_to',
'symmetrical',
'through',
'through_fields',
'db_table',
'db_constraint',
'swappable',
}
ONETOONE_FIELD_ATTRS = {
'parent_link',
}
STYLE_ATTRS = set(itertools.chain.from_iterable(termcolors.PALETTES.values()))
VIEW_ATTRS = {
(
(
'{}.{}'.format(cls.__module__, cls.__name__),
'.{}'.format(cls.__name__)
),
tuple(cls.__dict__.keys())
) for cls in (
View, RedirectView, ContextMixin,
DateMixin, DayMixin, MonthMixin, WeekMixin, YearMixin,
SingleObjectMixin, SingleObjectTemplateResponseMixin, TemplateResponseMixin,
DeletionMixin, FormMixin, ModelFormMixin,
MultipleObjectMixin, MultipleObjectTemplateResponseMixin,
)
}
FORM_ATTRS = {
'declared_fields',
}
def ignore_import_warnings_for_related_fields(orig_method, self, node):
"""
Replaces the leave_module method on the VariablesChecker class to
prevent unused-import warnings which are caused by the ForeignKey
and OneToOneField transformations. By replacing the nodes in the
AST with their type rather than the django field, imports of the
form 'from django.db.models import OneToOneField' raise an unused-import
warning
"""
consumer = self._to_consume[0] # pylint: disable=W0212
# we can disable this warning ('Access to a protected member _to_consume of a client class')
# as it's not actually a client class, but rather, this method is being monkey patched
# onto the class and so the access is valid
new_things = {}
iterat = consumer.to_consume.items if PY3 else consumer.to_consume.iteritems
for name, stmts in iterat():
if isinstance(stmts[0], ImportFrom):
if any([n[0] in ('ForeignKey', 'OneToOneField') for n in stmts[0].names]):
continue
new_things[name] = stmts
consumer._atomic = ScopeConsumer(new_things, consumer.consumed, consumer.scope_type) # pylint: disable=W0212
self._to_consume = [consumer] # pylint: disable=W0212
return orig_method(self, node)
def foreign_key_ids(chain, node):
if node.attrname.endswith('_id'):
return
chain()
def is_model_admin_subclass(node):
"""Checks that node is derivative of ModelAdmin class."""
if node.name[-5:] != 'Admin' or isinstance(node.parent, ClassDef):
return False
return node_is_subclass(node, 'django.contrib.admin.options.ModelAdmin')
def is_model_media_subclass(node):
"""Checks that node is derivative of Media class."""
if node.name != 'Media' or not isinstance(node.parent, ClassDef):
return False
parents = ('django.contrib.admin.options.ModelAdmin',
'django.forms.widgets.Media',
'django.db.models.base.Model',
'.Model', # for the transformed version used in this plugin
'django.forms.forms.Form',
'.Form',
'django.forms.widgets.Widget',
'.Widget',
'django.forms.models.ModelForm',
'.ModelForm')
return node_is_subclass(node.parent, *parents)
def is_model_meta_subclass(node):
"""Checks that node is derivative of Meta class."""
if node.name != 'Meta' or not isinstance(node.parent, ClassDef):
return False
parents = ('.Model', # for the transformed version used here
'django.db.models.base.Model',
'.Form',
'django.forms.forms.Form',
'.ModelForm',
'django.forms.models.ModelForm',
'rest_framework.serializers.BaseSerializer',
'rest_framework.generics.GenericAPIView',
'rest_framework.viewsets.ReadOnlyModelViewSet',
'rest_framework.viewsets.ModelViewSet',
'django_filters.filterset.FilterSet',
'factory.django.DjangoModelFactory',)
return node_is_subclass(node.parent, *parents)
def is_model_factory(node):
"""Checks that node is derivative of DjangoModelFactory or SubFactory class."""
try:
parent_classes = node.expr.inferred()
except: # noqa: E722, pylint: disable=bare-except
return False
parents = ('factory.declarations.LazyFunction',
'factory.declarations.SubFactory',
'factory.django.DjangoModelFactory')
for parent_class in parent_classes:
try:
if parent_class.qname() in parents:
return True
if node_is_subclass(parent_class, *parents):
return True
except AttributeError:
continue
return False
def is_factory_post_generation_method(node):
if not node.decorators:
return False
for decorator in node.decorators.get_children():
try:
inferred = decorator.inferred()
except InferenceError:
continue
for target in inferred:
if target.qname() == 'factory.helpers.post_generation':
return True
return False
def is_model_mpttmeta_subclass(node):
"""Checks that node is derivative of MPTTMeta class."""
if node.name != 'MPTTMeta' or not isinstance(node.parent, ClassDef):
return False
parents = ('django.db.models.base.Model',
'.Model', # for the transformed version used in this plugin
'django.forms.forms.Form',
'.Form',
'django.forms.models.ModelForm',
'.ModelForm')
return node_is_subclass(node.parent, *parents)
def _attribute_is_magic(node, attrs, parents):
"""Checks that node is an attribute used inside one of allowed parents"""
if node.attrname not in attrs:
return False
if not node.last_child():
return False
try:
for cls in node.last_child().inferred():
if isinstance(cls, Super):
cls = cls._self_class # pylint: disable=protected-access
if node_is_subclass(cls, *parents) or cls.qname() in parents:
return True
except InferenceError:
pass
return False
def is_style_attribute(node):
parents = ('django.core.management.color.Style', )
return _attribute_is_magic(node, STYLE_ATTRS, parents)
def is_manager_attribute(node):
"""Checks that node is attribute of Manager or QuerySet class."""
parents = ('django.db.models.manager.Manager',
'.Manager',
'factory.base.BaseFactory.build',
'django.db.models.query.QuerySet',
'.QuerySet')
return _attribute_is_magic(node, MANAGER_ATTRS.union(QS_ATTRS), parents)
def is_admin_attribute(node):
"""Checks that node is attribute of BaseModelAdmin."""
parents = ('django.contrib.admin.options.BaseModelAdmin',
'.BaseModelAdmin')
return _attribute_is_magic(node, MODELADMIN_ATTRS, parents)
def is_model_attribute(node):
"""Checks that node is attribute of Model."""
parents = ('django.db.models.base.Model',
'.Model')
return _attribute_is_magic(node, MODEL_ATTRS, parents)
def is_field_attribute(node):
"""Checks that node is attribute of Field."""
parents = ('django.db.models.fields.Field',
'.Field')
return _attribute_is_magic(node, FIELD_ATTRS, parents)
def is_charfield_attribute(node):
"""Checks that node is attribute of CharField."""
parents = ('django.db.models.fields.CharField',
'.CharField')
return _attribute_is_magic(node, CHAR_FIELD_ATTRS, parents)
def is_datefield_attribute(node):
"""Checks that node is attribute of DateField."""
parents = ('django.db.models.fields.DateField',
'.DateField')
return _attribute_is_magic(node, DATE_FIELD_ATTRS, parents)
def is_decimalfield_attribute(node):
"""Checks that node is attribute of DecimalField."""
parents = ('django.db.models.fields.DecimalField',
'.DecimalField')
return _attribute_is_magic(node, DECIMAL_FIELD_ATTRS, parents)
def is_filefield_attribute(node):
"""Checks that node is attribute of FileField."""
parents = ('django.db.models.fields.files.FileField',
'.FileField')
return _attribute_is_magic(node, FILE_FIELD_ATTRS, parents)
def is_imagefield_attribute(node):
"""Checks that node is attribute of ImageField."""
parents = ('django.db.models.fields.files.ImageField',
'.ImageField')
return _attribute_is_magic(node, IMAGE_FIELD_ATTRS, parents)
def is_ipfield_attribute(node):
"""Checks that node is attribute of GenericIPAddressField."""
parents = ('django.db.models.fields.GenericIPAddressField',
'.GenericIPAddressField')
return _attribute_is_magic(node, IP_FIELD_ATTRS, parents)
def is_slugfield_attribute(node):
"""Checks that node is attribute of SlugField."""
parents = ('django.db.models.fields.SlugField',
'.SlugField')
return _attribute_is_magic(node, SLUG_FIELD_ATTRS, parents)
def is_foreignkeyfield_attribute(node):
"""Checks that node is attribute of ForeignKey."""
parents = ('django.db.models.fields.related.ForeignKey',
'.ForeignKey')
return _attribute_is_magic(node, FOREIGNKEY_FIELD_ATTRS, parents)
def is_manytomanyfield_attribute(node):
"""Checks that node is attribute of ManyToManyField."""
parents = ('django.db.models.fields.related.ManyToManyField',
'.ManyToManyField')
return _attribute_is_magic(node, MANYTOMANY_FIELD_ATTRS, parents)
def is_onetoonefield_attribute(node):
"""Checks that node is attribute of OneToOneField."""
parents = ('django.db.models.fields.related.OneToOneField',
'.OneToOneField')
return _attribute_is_magic(node, ONETOONE_FIELD_ATTRS, parents)
def is_form_attribute(node):
"""Checks that node is attribute of Form."""
parents = ('django.forms.forms.Form', 'django.forms.models.ModelForm')
return _attribute_is_magic(node, FORM_ATTRS, parents)
def is_model_test_case_subclass(node):
"""Checks that node is derivative of TestCase class."""
if not node.name.endswith('Test') and not isinstance(node.parent, ClassDef):
return False
return node_is_subclass(node, 'django.test.testcases.TestCase')
def generic_is_view_attribute(parents, attrs):
"""Generates is_X_attribute function for given parents and attrs."""
def is_attribute(node):
return _attribute_is_magic(node, attrs, parents)
return is_attribute
def is_model_view_subclass_method_shouldnt_be_function(node):
"""Checks that node is get or post method of the View class."""
if node.name not in ('get', 'post'):
return False
parent = node.parent
while parent and not isinstance(parent, ScopedClass):
parent = parent.parent
subclass = ('django.views.View',
'django.views.generic.View',
'django.views.generic.base.View',)
return parent is not None and node_is_subclass(parent, *subclass)
def is_model_view_subclass_unused_argument(node):
"""
Checks that node is get or post method of the View class and it has valid arguments.
TODO: Bad checkings, need to be more smart.
"""
if not is_model_view_subclass_method_shouldnt_be_function(node):
return False
return is_argument_named_request(node)
def is_argument_named_request(node):
"""
If an unused-argument is named 'request' ignore that!
"""
return 'request' in node.argnames()
def is_model_field_display_method(node):
"""Accept model's fields with get_*_display names."""
if not node.attrname.endswith('_display'):
return False
if not node.attrname.startswith('get_'):
return False
if node.last_child():
# TODO: could validate the names of the fields on the model rather than
# blindly accepting get_*_display
try:
for cls in node.last_child().inferred():
if node_is_subclass(cls, 'django.db.models.base.Model', '.Model'):
return True
except InferenceError:
return False
return False
def is_model_media_valid_attributes(node):
"""Suppress warnings for valid attributes of Media class."""
if node.name not in ('js', ):
return False
parent = node.parent
while parent and not isinstance(parent, ScopedClass):
parent = parent.parent
if parent is None or parent.name != "Media":
return False
return True
def is_templatetags_module_valid_constant(node):
"""Suppress warnings for valid constants in templatetags module."""
if node.name not in ('register', ):
return False
parent = node.parent
while not isinstance(parent, Module):
parent = parent.parent
if "templatetags." not in parent.name:
return False
return True
def is_urls_module_valid_constant(node):
"""Suppress warnings for valid constants in urls module."""
if node.name not in ('urlpatterns', 'app_name'):
return False
parent = node.parent
while not isinstance(parent, Module):
parent = parent.parent
if not parent.name.endswith('urls'):
return False
return True
def allow_meta_protected_access(node):
if django_version >= (1, 8):
return node.attrname == '_meta'
return False
def is_class(class_name):
"""Shortcut for node_is_subclass."""
return lambda node: node_is_subclass(node, class_name)
def wrap(orig_method, with_method):
def wrap_func(*args, **kwargs):
with_method(orig_method, *args, **kwargs)
return wrap_func
def is_wsgi_application(node):
frame = node.frame()
return node.name == 'application' and isinstance(frame, Module) and \
(frame.name == 'wsgi' or frame.path[0].endswith('wsgi.py') or frame.file.endswith('wsgi.py'))
# Compat helpers
def pylint_newstyle_classdef_compat(linter, warning_name, augment):
if not hasattr(NewStyleConflictChecker, 'visit_classdef'):
return
suppress_message(linter, getattr(NewStyleConflictChecker, 'visit_classdef'), warning_name, augment)
# augment things
def apply_augmentations(linter):
"""Apply augmentation and suppression rules."""
augment_visit(linter, TypeChecker.visit_attribute, foreign_key_sets)
augment_visit(linter, TypeChecker.visit_attribute, foreign_key_ids)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_field_display_method)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_style_attribute)
suppress_message(linter, NameChecker.visit_assignname, 'invalid-name', is_urls_module_valid_constant)
# supress errors when accessing magical class attributes
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_manager_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_admin_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_field_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_charfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_datefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_decimalfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_filefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_imagefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_ipfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_slugfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_foreignkeyfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_manytomanyfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_onetoonefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_form_attribute)
for parents, attrs in VIEW_ATTRS:
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', generic_is_view_attribute(parents, attrs))
# formviews have too many ancestors, there's nothing the user of the library can do about that
suppress_message(linter, MisdesignChecker.visit_classdef, 'too-many-ancestors',
is_class('django.views.generic.edit.FormView'))
# model forms have no __init__ method anywhere in their bases
suppress_message(linter, ClassChecker.visit_classdef, 'W0232', is_class('django.forms.models.ModelForm'))
# Meta
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_meta_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_meta_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'no-init', is_model_meta_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_meta_subclass)
suppress_message(linter, ClassChecker.visit_attribute, 'protected-access', allow_meta_protected_access)
# Media
suppress_message(linter, NameChecker.visit_assignname, 'C0103', is_model_media_valid_attributes)
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_media_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_media_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'no-init', is_model_media_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_media_subclass)
# Admin
# Too many public methods (40+/20)
# TODO: Count public methods of django.contrib.admin.options.ModelAdmin and increase
# MisdesignChecker.config.max_public_methods to this value to count only user' methods.
# nb_public_methods = 0
# for method in node.methods():
# if not method.name.startswith('_'):
# nb_public_methods += 1
suppress_message(linter, MisdesignChecker.leave_classdef, 'R0904', is_model_admin_subclass)
# Tests
suppress_message(linter, MisdesignChecker.leave_classdef, 'R0904', is_model_test_case_subclass)
# View
# Method could be a function (get, post)
suppress_message(linter, ClassChecker.leave_functiondef, 'no-self-use',
is_model_view_subclass_method_shouldnt_be_function)
# Unused argument 'request' (get, post)
suppress_message(linter, VariablesChecker.leave_functiondef, 'unused-argument',
is_model_view_subclass_unused_argument)
suppress_message(linter, VariablesChecker.leave_functiondef, 'unused-argument', is_argument_named_request)
# django-mptt
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_mpttmeta_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_mpttmeta_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'W0232', is_model_mpttmeta_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_mpttmeta_subclass)
# factory_boy's DjangoModelFactory
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_factory)
suppress_message(linter, ClassChecker.visit_functiondef, 'no-self-argument', is_factory_post_generation_method)
# ForeignKey and OneToOneField
# Must update this in a thread safe way to support the parallel option on pylint (-j)
current_leave_module = VariablesChecker.leave_module
if current_leave_module.__name__ == 'leave_module':
# current_leave_module is not wrapped
# Two threads may hit the next assignment concurrently, but the result is the same
VariablesChecker.leave_module = wrap(current_leave_module, ignore_import_warnings_for_related_fields)
# VariablesChecker.leave_module is now wrapped
# else VariablesChecker.leave_module is already wrapped
# wsgi.py
suppress_message(linter, NameChecker.visit_assignname, 'invalid-name', is_wsgi_application)
|
PyCQA/pylint-django
|
pylint_django/augmentations/__init__.py
|
is_model_admin_subclass
|
python
|
def is_model_admin_subclass(node):
if node.name[-5:] != 'Admin' or isinstance(node.parent, ClassDef):
return False
return node_is_subclass(node, 'django.contrib.admin.options.ModelAdmin')
|
Checks that node is derivative of ModelAdmin class.
|
train
|
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/augmentations/__init__.py#L382-L387
|
[
"def node_is_subclass(cls, *subclass_names):\n \"\"\"Checks if cls node has parent with subclass_name.\"\"\"\n if not isinstance(cls, (ClassDef, Instance)):\n return False\n\n if cls.bases == Uninferable:\n return False\n for base_cls in cls.bases:\n try:\n for inf in base_cls.inferred():\n if inf.qname() in subclass_names:\n return True\n if inf != cls and node_is_subclass(inf, *subclass_names):\n # check up the hierarchy in case we are a subclass of\n # a subclass of a subclass ...\n return True\n except InferenceError:\n continue\n\n return False\n"
] |
"""Augmentations."""
# pylint: disable=invalid-name
import itertools
from astroid import InferenceError
from astroid.objects import Super
from astroid.nodes import ClassDef, ImportFrom, Attribute
from astroid.scoped_nodes import ClassDef as ScopedClass, Module
from pylint.checkers.base import DocStringChecker, NameChecker
from pylint.checkers.design_analysis import MisdesignChecker
from pylint.checkers.classes import ClassChecker
from pylint.checkers.newstyle import NewStyleConflictChecker
from pylint.checkers.variables import VariablesChecker
from pylint.checkers.typecheck import TypeChecker
from pylint.checkers.variables import ScopeConsumer
from pylint_plugin_utils import augment_visit, suppress_message
from django import VERSION as django_version
from django.views.generic.base import View, RedirectView, ContextMixin
from django.views.generic.dates import DateMixin, DayMixin, MonthMixin, WeekMixin, YearMixin
from django.views.generic.detail import SingleObjectMixin, SingleObjectTemplateResponseMixin, TemplateResponseMixin
from django.views.generic.edit import DeletionMixin, FormMixin, ModelFormMixin
from django.views.generic.list import MultipleObjectMixin, MultipleObjectTemplateResponseMixin
from django.utils import termcolors
from pylint_django.utils import node_is_subclass, PY3
# Note: it would have been nice to import the Manager object from Django and
# get its attributes that way - and this used to be the method - but unfortunately
# there's no guarantee that Django is properly configured at that stage, and importing
# anything from the django.db package causes an ImproperlyConfigured exception.
# Therefore we'll fall back on a hard-coded list of attributes which won't be as accurate,
# but this is not 100% accurate anyway.
MANAGER_ATTRS = {
'none',
'all',
'count',
'dates',
'distinct',
'extra',
'get',
'get_or_create',
'get_queryset',
'create',
'bulk_create',
'filter',
'aggregate',
'annotate',
'complex_filter',
'exclude',
'in_bulk',
'iterator',
'latest',
'order_by',
'select_for_update',
'select_related',
'prefetch_related',
'values',
'values_list',
'update',
'reverse',
'defer',
'only',
'using',
'exists',
}
QS_ATTRS = {
'filter',
'exclude',
'annotate',
'order_by',
'reverse',
'distinct',
'values',
'values_list',
'dates',
'datetimes',
'none',
'all',
'select_related',
'prefetch_related',
'extra',
'defer',
'only',
'using',
'select_for_update',
'raw',
'get',
'create',
'get_or_create',
'update_or_create',
'bulk_create',
'count',
'in_bulk',
'iterator',
'latest',
'earliest',
'first',
'last',
'aggregate',
'exists',
'update',
'delete',
'as_manager',
'expression',
'output_field',
}
MODELADMIN_ATTRS = {
# options
'actions',
'actions_on_top',
'actions_on_bottom',
'actions_selection_counter',
'date_hierarchy',
'empty_value_display',
'exclude',
'fields',
'fieldsets',
'filter_horizontal',
'filter_vertical',
'form',
'formfield_overrides',
'inlines',
'list_display',
'list_display_links',
'list_editable',
'list_filter',
'list_max_show_all',
'list_per_page',
'list_select_related',
'ordering',
'paginator',
'prepopulated_fields',
'preserve_filters',
'radio_fields',
'raw_id_fields',
'readonly_fields',
'save_as',
'save_on_top',
'search_fields',
'show_full_result_count',
'view_on_site',
# template options
'add_form_template',
'change_form_template',
'change_list_template',
'delete_confirmation_template',
'delete_selected_confirmation_template',
'object_history_template',
}
MODEL_ATTRS = {
'id',
'DoesNotExist',
'MultipleObjectsReturned',
'_base_manager',
'_default_manager',
'_meta',
'delete',
'get_next_by_date',
'get_previous_by_date',
'objects',
'save',
}
FIELD_ATTRS = {
'null',
'blank',
'choices',
'db_column',
'db_index',
'db_tablespace',
'default',
'editable',
'error_messages',
'help_text',
'primary_key',
'unique',
'unique_for_date',
'unique_for_month',
'unique_for_year',
'verbose_name',
'validators',
}
CHAR_FIELD_ATTRS = {
'max_length',
}
DATE_FIELD_ATTRS = {
'auto_now',
'auto_now_add',
}
DECIMAL_FIELD_ATTRS = {
'max_digits',
'decimal_places',
}
FILE_FIELD_ATTRS = {
'upload_to',
'storage',
}
IMAGE_FIELD_ATTRS = {
'height_field',
'width_field',
}
IP_FIELD_ATTRS = {
'protocol',
'unpack_ipv4',
}
SLUG_FIELD_ATTRS = {
'allow_unicode',
}
FOREIGNKEY_FIELD_ATTRS = {
'limit_choices_to',
'related_name',
'related_query_name',
'to_field',
'db_constraint',
'swappable',
}
MANYTOMANY_FIELD_ATTRS = {
'add',
'clear',
'related_name',
'related_query_name',
'limit_choices_to',
'symmetrical',
'through',
'through_fields',
'db_table',
'db_constraint',
'swappable',
}
ONETOONE_FIELD_ATTRS = {
'parent_link',
}
STYLE_ATTRS = set(itertools.chain.from_iterable(termcolors.PALETTES.values()))
VIEW_ATTRS = {
(
(
'{}.{}'.format(cls.__module__, cls.__name__),
'.{}'.format(cls.__name__)
),
tuple(cls.__dict__.keys())
) for cls in (
View, RedirectView, ContextMixin,
DateMixin, DayMixin, MonthMixin, WeekMixin, YearMixin,
SingleObjectMixin, SingleObjectTemplateResponseMixin, TemplateResponseMixin,
DeletionMixin, FormMixin, ModelFormMixin,
MultipleObjectMixin, MultipleObjectTemplateResponseMixin,
)
}
FORM_ATTRS = {
'declared_fields',
}
def ignore_import_warnings_for_related_fields(orig_method, self, node):
"""
Replaces the leave_module method on the VariablesChecker class to
prevent unused-import warnings which are caused by the ForeignKey
and OneToOneField transformations. By replacing the nodes in the
AST with their type rather than the django field, imports of the
form 'from django.db.models import OneToOneField' raise an unused-import
warning
"""
consumer = self._to_consume[0] # pylint: disable=W0212
# we can disable this warning ('Access to a protected member _to_consume of a client class')
# as it's not actually a client class, but rather, this method is being monkey patched
# onto the class and so the access is valid
new_things = {}
iterat = consumer.to_consume.items if PY3 else consumer.to_consume.iteritems
for name, stmts in iterat():
if isinstance(stmts[0], ImportFrom):
if any([n[0] in ('ForeignKey', 'OneToOneField') for n in stmts[0].names]):
continue
new_things[name] = stmts
consumer._atomic = ScopeConsumer(new_things, consumer.consumed, consumer.scope_type) # pylint: disable=W0212
self._to_consume = [consumer] # pylint: disable=W0212
return orig_method(self, node)
def foreign_key_sets(chain, node):
"""
When a Django model has a ForeignKey to another model, the target
of the foreign key gets a '<modelname>_set' attribute for accessing
a queryset of the model owning the foreign key - eg:
class ModelA(models.Model):
pass
class ModelB(models.Model):
a = models.ForeignKey(ModelA)
Now, ModelA instances will have a modelb_set attribute.
It's also possible to explicitly name the relationship using the related_name argument
to the ForeignKey constructor. As it's impossible to know this without inspecting all
models before processing, we'll instead do a "best guess" approach and see if the attribute
being accessed goes on to be used as a queryset. This is via 'duck typing': if the method
called on the attribute being accessed is something we might find in a queryset, we'll
warn.
"""
quack = False
if node.attrname in MANAGER_ATTRS or node.attrname.endswith('_set'):
# if this is a X_set method, that's a pretty strong signal that this is the default
# Django name, rather than one set by related_name
quack = True
else:
# we will
if isinstance(node.parent, Attribute):
func_name = getattr(node.parent, 'attrname', None)
if func_name in MANAGER_ATTRS:
quack = True
if quack:
children = list(node.get_children())
for child in children:
try:
inferred_cls = child.inferred()
except InferenceError:
pass
else:
for cls in inferred_cls:
if (node_is_subclass(cls,
'django.db.models.manager.Manager',
'django.db.models.base.Model',
'.Model',
'django.db.models.fields.related.ForeignObject')):
# This means that we are looking at a subclass of models.Model
# and something is trying to access a <something>_set attribute.
# Since this could exist, we will return so as not to raise an
# error.
return
chain()
def foreign_key_ids(chain, node):
if node.attrname.endswith('_id'):
return
chain()
def is_model_media_subclass(node):
"""Checks that node is derivative of Media class."""
if node.name != 'Media' or not isinstance(node.parent, ClassDef):
return False
parents = ('django.contrib.admin.options.ModelAdmin',
'django.forms.widgets.Media',
'django.db.models.base.Model',
'.Model', # for the transformed version used in this plugin
'django.forms.forms.Form',
'.Form',
'django.forms.widgets.Widget',
'.Widget',
'django.forms.models.ModelForm',
'.ModelForm')
return node_is_subclass(node.parent, *parents)
def is_model_meta_subclass(node):
"""Checks that node is derivative of Meta class."""
if node.name != 'Meta' or not isinstance(node.parent, ClassDef):
return False
parents = ('.Model', # for the transformed version used here
'django.db.models.base.Model',
'.Form',
'django.forms.forms.Form',
'.ModelForm',
'django.forms.models.ModelForm',
'rest_framework.serializers.BaseSerializer',
'rest_framework.generics.GenericAPIView',
'rest_framework.viewsets.ReadOnlyModelViewSet',
'rest_framework.viewsets.ModelViewSet',
'django_filters.filterset.FilterSet',
'factory.django.DjangoModelFactory',)
return node_is_subclass(node.parent, *parents)
def is_model_factory(node):
"""Checks that node is derivative of DjangoModelFactory or SubFactory class."""
try:
parent_classes = node.expr.inferred()
except: # noqa: E722, pylint: disable=bare-except
return False
parents = ('factory.declarations.LazyFunction',
'factory.declarations.SubFactory',
'factory.django.DjangoModelFactory')
for parent_class in parent_classes:
try:
if parent_class.qname() in parents:
return True
if node_is_subclass(parent_class, *parents):
return True
except AttributeError:
continue
return False
def is_factory_post_generation_method(node):
if not node.decorators:
return False
for decorator in node.decorators.get_children():
try:
inferred = decorator.inferred()
except InferenceError:
continue
for target in inferred:
if target.qname() == 'factory.helpers.post_generation':
return True
return False
def is_model_mpttmeta_subclass(node):
"""Checks that node is derivative of MPTTMeta class."""
if node.name != 'MPTTMeta' or not isinstance(node.parent, ClassDef):
return False
parents = ('django.db.models.base.Model',
'.Model', # for the transformed version used in this plugin
'django.forms.forms.Form',
'.Form',
'django.forms.models.ModelForm',
'.ModelForm')
return node_is_subclass(node.parent, *parents)
def _attribute_is_magic(node, attrs, parents):
"""Checks that node is an attribute used inside one of allowed parents"""
if node.attrname not in attrs:
return False
if not node.last_child():
return False
try:
for cls in node.last_child().inferred():
if isinstance(cls, Super):
cls = cls._self_class # pylint: disable=protected-access
if node_is_subclass(cls, *parents) or cls.qname() in parents:
return True
except InferenceError:
pass
return False
def is_style_attribute(node):
parents = ('django.core.management.color.Style', )
return _attribute_is_magic(node, STYLE_ATTRS, parents)
def is_manager_attribute(node):
"""Checks that node is attribute of Manager or QuerySet class."""
parents = ('django.db.models.manager.Manager',
'.Manager',
'factory.base.BaseFactory.build',
'django.db.models.query.QuerySet',
'.QuerySet')
return _attribute_is_magic(node, MANAGER_ATTRS.union(QS_ATTRS), parents)
def is_admin_attribute(node):
"""Checks that node is attribute of BaseModelAdmin."""
parents = ('django.contrib.admin.options.BaseModelAdmin',
'.BaseModelAdmin')
return _attribute_is_magic(node, MODELADMIN_ATTRS, parents)
def is_model_attribute(node):
"""Checks that node is attribute of Model."""
parents = ('django.db.models.base.Model',
'.Model')
return _attribute_is_magic(node, MODEL_ATTRS, parents)
def is_field_attribute(node):
"""Checks that node is attribute of Field."""
parents = ('django.db.models.fields.Field',
'.Field')
return _attribute_is_magic(node, FIELD_ATTRS, parents)
def is_charfield_attribute(node):
"""Checks that node is attribute of CharField."""
parents = ('django.db.models.fields.CharField',
'.CharField')
return _attribute_is_magic(node, CHAR_FIELD_ATTRS, parents)
def is_datefield_attribute(node):
"""Checks that node is attribute of DateField."""
parents = ('django.db.models.fields.DateField',
'.DateField')
return _attribute_is_magic(node, DATE_FIELD_ATTRS, parents)
def is_decimalfield_attribute(node):
"""Checks that node is attribute of DecimalField."""
parents = ('django.db.models.fields.DecimalField',
'.DecimalField')
return _attribute_is_magic(node, DECIMAL_FIELD_ATTRS, parents)
def is_filefield_attribute(node):
"""Checks that node is attribute of FileField."""
parents = ('django.db.models.fields.files.FileField',
'.FileField')
return _attribute_is_magic(node, FILE_FIELD_ATTRS, parents)
def is_imagefield_attribute(node):
"""Checks that node is attribute of ImageField."""
parents = ('django.db.models.fields.files.ImageField',
'.ImageField')
return _attribute_is_magic(node, IMAGE_FIELD_ATTRS, parents)
def is_ipfield_attribute(node):
"""Checks that node is attribute of GenericIPAddressField."""
parents = ('django.db.models.fields.GenericIPAddressField',
'.GenericIPAddressField')
return _attribute_is_magic(node, IP_FIELD_ATTRS, parents)
def is_slugfield_attribute(node):
"""Checks that node is attribute of SlugField."""
parents = ('django.db.models.fields.SlugField',
'.SlugField')
return _attribute_is_magic(node, SLUG_FIELD_ATTRS, parents)
def is_foreignkeyfield_attribute(node):
"""Checks that node is attribute of ForeignKey."""
parents = ('django.db.models.fields.related.ForeignKey',
'.ForeignKey')
return _attribute_is_magic(node, FOREIGNKEY_FIELD_ATTRS, parents)
def is_manytomanyfield_attribute(node):
"""Checks that node is attribute of ManyToManyField."""
parents = ('django.db.models.fields.related.ManyToManyField',
'.ManyToManyField')
return _attribute_is_magic(node, MANYTOMANY_FIELD_ATTRS, parents)
def is_onetoonefield_attribute(node):
"""Checks that node is attribute of OneToOneField."""
parents = ('django.db.models.fields.related.OneToOneField',
'.OneToOneField')
return _attribute_is_magic(node, ONETOONE_FIELD_ATTRS, parents)
def is_form_attribute(node):
"""Checks that node is attribute of Form."""
parents = ('django.forms.forms.Form', 'django.forms.models.ModelForm')
return _attribute_is_magic(node, FORM_ATTRS, parents)
def is_model_test_case_subclass(node):
"""Checks that node is derivative of TestCase class."""
if not node.name.endswith('Test') and not isinstance(node.parent, ClassDef):
return False
return node_is_subclass(node, 'django.test.testcases.TestCase')
def generic_is_view_attribute(parents, attrs):
"""Generates is_X_attribute function for given parents and attrs."""
def is_attribute(node):
return _attribute_is_magic(node, attrs, parents)
return is_attribute
def is_model_view_subclass_method_shouldnt_be_function(node):
"""Checks that node is get or post method of the View class."""
if node.name not in ('get', 'post'):
return False
parent = node.parent
while parent and not isinstance(parent, ScopedClass):
parent = parent.parent
subclass = ('django.views.View',
'django.views.generic.View',
'django.views.generic.base.View',)
return parent is not None and node_is_subclass(parent, *subclass)
def is_model_view_subclass_unused_argument(node):
"""
Checks that node is get or post method of the View class and it has valid arguments.
TODO: Bad checkings, need to be more smart.
"""
if not is_model_view_subclass_method_shouldnt_be_function(node):
return False
return is_argument_named_request(node)
def is_argument_named_request(node):
"""
If an unused-argument is named 'request' ignore that!
"""
return 'request' in node.argnames()
def is_model_field_display_method(node):
"""Accept model's fields with get_*_display names."""
if not node.attrname.endswith('_display'):
return False
if not node.attrname.startswith('get_'):
return False
if node.last_child():
# TODO: could validate the names of the fields on the model rather than
# blindly accepting get_*_display
try:
for cls in node.last_child().inferred():
if node_is_subclass(cls, 'django.db.models.base.Model', '.Model'):
return True
except InferenceError:
return False
return False
def is_model_media_valid_attributes(node):
"""Suppress warnings for valid attributes of Media class."""
if node.name not in ('js', ):
return False
parent = node.parent
while parent and not isinstance(parent, ScopedClass):
parent = parent.parent
if parent is None or parent.name != "Media":
return False
return True
def is_templatetags_module_valid_constant(node):
"""Suppress warnings for valid constants in templatetags module."""
if node.name not in ('register', ):
return False
parent = node.parent
while not isinstance(parent, Module):
parent = parent.parent
if "templatetags." not in parent.name:
return False
return True
def is_urls_module_valid_constant(node):
"""Suppress warnings for valid constants in urls module."""
if node.name not in ('urlpatterns', 'app_name'):
return False
parent = node.parent
while not isinstance(parent, Module):
parent = parent.parent
if not parent.name.endswith('urls'):
return False
return True
def allow_meta_protected_access(node):
if django_version >= (1, 8):
return node.attrname == '_meta'
return False
def is_class(class_name):
"""Shortcut for node_is_subclass."""
return lambda node: node_is_subclass(node, class_name)
def wrap(orig_method, with_method):
def wrap_func(*args, **kwargs):
with_method(orig_method, *args, **kwargs)
return wrap_func
def is_wsgi_application(node):
frame = node.frame()
return node.name == 'application' and isinstance(frame, Module) and \
(frame.name == 'wsgi' or frame.path[0].endswith('wsgi.py') or frame.file.endswith('wsgi.py'))
# Compat helpers
def pylint_newstyle_classdef_compat(linter, warning_name, augment):
if not hasattr(NewStyleConflictChecker, 'visit_classdef'):
return
suppress_message(linter, getattr(NewStyleConflictChecker, 'visit_classdef'), warning_name, augment)
# augment things
def apply_augmentations(linter):
"""Apply augmentation and suppression rules."""
augment_visit(linter, TypeChecker.visit_attribute, foreign_key_sets)
augment_visit(linter, TypeChecker.visit_attribute, foreign_key_ids)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_field_display_method)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_style_attribute)
suppress_message(linter, NameChecker.visit_assignname, 'invalid-name', is_urls_module_valid_constant)
# supress errors when accessing magical class attributes
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_manager_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_admin_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_field_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_charfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_datefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_decimalfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_filefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_imagefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_ipfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_slugfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_foreignkeyfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_manytomanyfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_onetoonefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_form_attribute)
for parents, attrs in VIEW_ATTRS:
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', generic_is_view_attribute(parents, attrs))
# formviews have too many ancestors, there's nothing the user of the library can do about that
suppress_message(linter, MisdesignChecker.visit_classdef, 'too-many-ancestors',
is_class('django.views.generic.edit.FormView'))
# model forms have no __init__ method anywhere in their bases
suppress_message(linter, ClassChecker.visit_classdef, 'W0232', is_class('django.forms.models.ModelForm'))
# Meta
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_meta_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_meta_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'no-init', is_model_meta_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_meta_subclass)
suppress_message(linter, ClassChecker.visit_attribute, 'protected-access', allow_meta_protected_access)
# Media
suppress_message(linter, NameChecker.visit_assignname, 'C0103', is_model_media_valid_attributes)
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_media_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_media_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'no-init', is_model_media_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_media_subclass)
# Admin
# Too many public methods (40+/20)
# TODO: Count public methods of django.contrib.admin.options.ModelAdmin and increase
# MisdesignChecker.config.max_public_methods to this value to count only user' methods.
# nb_public_methods = 0
# for method in node.methods():
# if not method.name.startswith('_'):
# nb_public_methods += 1
suppress_message(linter, MisdesignChecker.leave_classdef, 'R0904', is_model_admin_subclass)
# Tests
suppress_message(linter, MisdesignChecker.leave_classdef, 'R0904', is_model_test_case_subclass)
# View
# Method could be a function (get, post)
suppress_message(linter, ClassChecker.leave_functiondef, 'no-self-use',
is_model_view_subclass_method_shouldnt_be_function)
# Unused argument 'request' (get, post)
suppress_message(linter, VariablesChecker.leave_functiondef, 'unused-argument',
is_model_view_subclass_unused_argument)
suppress_message(linter, VariablesChecker.leave_functiondef, 'unused-argument', is_argument_named_request)
# django-mptt
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_mpttmeta_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_mpttmeta_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'W0232', is_model_mpttmeta_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_mpttmeta_subclass)
# factory_boy's DjangoModelFactory
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_factory)
suppress_message(linter, ClassChecker.visit_functiondef, 'no-self-argument', is_factory_post_generation_method)
# ForeignKey and OneToOneField
# Must update this in a thread safe way to support the parallel option on pylint (-j)
current_leave_module = VariablesChecker.leave_module
if current_leave_module.__name__ == 'leave_module':
# current_leave_module is not wrapped
# Two threads may hit the next assignment concurrently, but the result is the same
VariablesChecker.leave_module = wrap(current_leave_module, ignore_import_warnings_for_related_fields)
# VariablesChecker.leave_module is now wrapped
# else VariablesChecker.leave_module is already wrapped
# wsgi.py
suppress_message(linter, NameChecker.visit_assignname, 'invalid-name', is_wsgi_application)
|
PyCQA/pylint-django
|
pylint_django/augmentations/__init__.py
|
is_model_factory
|
python
|
def is_model_factory(node):
try:
parent_classes = node.expr.inferred()
except: # noqa: E722, pylint: disable=bare-except
return False
parents = ('factory.declarations.LazyFunction',
'factory.declarations.SubFactory',
'factory.django.DjangoModelFactory')
for parent_class in parent_classes:
try:
if parent_class.qname() in parents:
return True
if node_is_subclass(parent_class, *parents):
return True
except AttributeError:
continue
return False
|
Checks that node is derivative of DjangoModelFactory or SubFactory class.
|
train
|
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/augmentations/__init__.py#L428-L449
|
[
"def node_is_subclass(cls, *subclass_names):\n \"\"\"Checks if cls node has parent with subclass_name.\"\"\"\n if not isinstance(cls, (ClassDef, Instance)):\n return False\n\n if cls.bases == Uninferable:\n return False\n for base_cls in cls.bases:\n try:\n for inf in base_cls.inferred():\n if inf.qname() in subclass_names:\n return True\n if inf != cls and node_is_subclass(inf, *subclass_names):\n # check up the hierarchy in case we are a subclass of\n # a subclass of a subclass ...\n return True\n except InferenceError:\n continue\n\n return False\n"
] |
"""Augmentations."""
# pylint: disable=invalid-name
import itertools
from astroid import InferenceError
from astroid.objects import Super
from astroid.nodes import ClassDef, ImportFrom, Attribute
from astroid.scoped_nodes import ClassDef as ScopedClass, Module
from pylint.checkers.base import DocStringChecker, NameChecker
from pylint.checkers.design_analysis import MisdesignChecker
from pylint.checkers.classes import ClassChecker
from pylint.checkers.newstyle import NewStyleConflictChecker
from pylint.checkers.variables import VariablesChecker
from pylint.checkers.typecheck import TypeChecker
from pylint.checkers.variables import ScopeConsumer
from pylint_plugin_utils import augment_visit, suppress_message
from django import VERSION as django_version
from django.views.generic.base import View, RedirectView, ContextMixin
from django.views.generic.dates import DateMixin, DayMixin, MonthMixin, WeekMixin, YearMixin
from django.views.generic.detail import SingleObjectMixin, SingleObjectTemplateResponseMixin, TemplateResponseMixin
from django.views.generic.edit import DeletionMixin, FormMixin, ModelFormMixin
from django.views.generic.list import MultipleObjectMixin, MultipleObjectTemplateResponseMixin
from django.utils import termcolors
from pylint_django.utils import node_is_subclass, PY3
# Note: it would have been nice to import the Manager object from Django and
# get its attributes that way - and this used to be the method - but unfortunately
# there's no guarantee that Django is properly configured at that stage, and importing
# anything from the django.db package causes an ImproperlyConfigured exception.
# Therefore we'll fall back on a hard-coded list of attributes which won't be as accurate,
# but this is not 100% accurate anyway.
MANAGER_ATTRS = {
'none',
'all',
'count',
'dates',
'distinct',
'extra',
'get',
'get_or_create',
'get_queryset',
'create',
'bulk_create',
'filter',
'aggregate',
'annotate',
'complex_filter',
'exclude',
'in_bulk',
'iterator',
'latest',
'order_by',
'select_for_update',
'select_related',
'prefetch_related',
'values',
'values_list',
'update',
'reverse',
'defer',
'only',
'using',
'exists',
}
QS_ATTRS = {
'filter',
'exclude',
'annotate',
'order_by',
'reverse',
'distinct',
'values',
'values_list',
'dates',
'datetimes',
'none',
'all',
'select_related',
'prefetch_related',
'extra',
'defer',
'only',
'using',
'select_for_update',
'raw',
'get',
'create',
'get_or_create',
'update_or_create',
'bulk_create',
'count',
'in_bulk',
'iterator',
'latest',
'earliest',
'first',
'last',
'aggregate',
'exists',
'update',
'delete',
'as_manager',
'expression',
'output_field',
}
MODELADMIN_ATTRS = {
# options
'actions',
'actions_on_top',
'actions_on_bottom',
'actions_selection_counter',
'date_hierarchy',
'empty_value_display',
'exclude',
'fields',
'fieldsets',
'filter_horizontal',
'filter_vertical',
'form',
'formfield_overrides',
'inlines',
'list_display',
'list_display_links',
'list_editable',
'list_filter',
'list_max_show_all',
'list_per_page',
'list_select_related',
'ordering',
'paginator',
'prepopulated_fields',
'preserve_filters',
'radio_fields',
'raw_id_fields',
'readonly_fields',
'save_as',
'save_on_top',
'search_fields',
'show_full_result_count',
'view_on_site',
# template options
'add_form_template',
'change_form_template',
'change_list_template',
'delete_confirmation_template',
'delete_selected_confirmation_template',
'object_history_template',
}
MODEL_ATTRS = {
'id',
'DoesNotExist',
'MultipleObjectsReturned',
'_base_manager',
'_default_manager',
'_meta',
'delete',
'get_next_by_date',
'get_previous_by_date',
'objects',
'save',
}
FIELD_ATTRS = {
'null',
'blank',
'choices',
'db_column',
'db_index',
'db_tablespace',
'default',
'editable',
'error_messages',
'help_text',
'primary_key',
'unique',
'unique_for_date',
'unique_for_month',
'unique_for_year',
'verbose_name',
'validators',
}
CHAR_FIELD_ATTRS = {
'max_length',
}
DATE_FIELD_ATTRS = {
'auto_now',
'auto_now_add',
}
DECIMAL_FIELD_ATTRS = {
'max_digits',
'decimal_places',
}
FILE_FIELD_ATTRS = {
'upload_to',
'storage',
}
IMAGE_FIELD_ATTRS = {
'height_field',
'width_field',
}
IP_FIELD_ATTRS = {
'protocol',
'unpack_ipv4',
}
SLUG_FIELD_ATTRS = {
'allow_unicode',
}
FOREIGNKEY_FIELD_ATTRS = {
'limit_choices_to',
'related_name',
'related_query_name',
'to_field',
'db_constraint',
'swappable',
}
MANYTOMANY_FIELD_ATTRS = {
'add',
'clear',
'related_name',
'related_query_name',
'limit_choices_to',
'symmetrical',
'through',
'through_fields',
'db_table',
'db_constraint',
'swappable',
}
ONETOONE_FIELD_ATTRS = {
'parent_link',
}
STYLE_ATTRS = set(itertools.chain.from_iterable(termcolors.PALETTES.values()))
VIEW_ATTRS = {
(
(
'{}.{}'.format(cls.__module__, cls.__name__),
'.{}'.format(cls.__name__)
),
tuple(cls.__dict__.keys())
) for cls in (
View, RedirectView, ContextMixin,
DateMixin, DayMixin, MonthMixin, WeekMixin, YearMixin,
SingleObjectMixin, SingleObjectTemplateResponseMixin, TemplateResponseMixin,
DeletionMixin, FormMixin, ModelFormMixin,
MultipleObjectMixin, MultipleObjectTemplateResponseMixin,
)
}
FORM_ATTRS = {
'declared_fields',
}
def ignore_import_warnings_for_related_fields(orig_method, self, node):
"""
Replaces the leave_module method on the VariablesChecker class to
prevent unused-import warnings which are caused by the ForeignKey
and OneToOneField transformations. By replacing the nodes in the
AST with their type rather than the django field, imports of the
form 'from django.db.models import OneToOneField' raise an unused-import
warning
"""
consumer = self._to_consume[0] # pylint: disable=W0212
# we can disable this warning ('Access to a protected member _to_consume of a client class')
# as it's not actually a client class, but rather, this method is being monkey patched
# onto the class and so the access is valid
new_things = {}
iterat = consumer.to_consume.items if PY3 else consumer.to_consume.iteritems
for name, stmts in iterat():
if isinstance(stmts[0], ImportFrom):
if any([n[0] in ('ForeignKey', 'OneToOneField') for n in stmts[0].names]):
continue
new_things[name] = stmts
consumer._atomic = ScopeConsumer(new_things, consumer.consumed, consumer.scope_type) # pylint: disable=W0212
self._to_consume = [consumer] # pylint: disable=W0212
return orig_method(self, node)
def foreign_key_sets(chain, node):
"""
When a Django model has a ForeignKey to another model, the target
of the foreign key gets a '<modelname>_set' attribute for accessing
a queryset of the model owning the foreign key - eg:
class ModelA(models.Model):
pass
class ModelB(models.Model):
a = models.ForeignKey(ModelA)
Now, ModelA instances will have a modelb_set attribute.
It's also possible to explicitly name the relationship using the related_name argument
to the ForeignKey constructor. As it's impossible to know this without inspecting all
models before processing, we'll instead do a "best guess" approach and see if the attribute
being accessed goes on to be used as a queryset. This is via 'duck typing': if the method
called on the attribute being accessed is something we might find in a queryset, we'll
warn.
"""
quack = False
if node.attrname in MANAGER_ATTRS or node.attrname.endswith('_set'):
# if this is a X_set method, that's a pretty strong signal that this is the default
# Django name, rather than one set by related_name
quack = True
else:
# we will
if isinstance(node.parent, Attribute):
func_name = getattr(node.parent, 'attrname', None)
if func_name in MANAGER_ATTRS:
quack = True
if quack:
children = list(node.get_children())
for child in children:
try:
inferred_cls = child.inferred()
except InferenceError:
pass
else:
for cls in inferred_cls:
if (node_is_subclass(cls,
'django.db.models.manager.Manager',
'django.db.models.base.Model',
'.Model',
'django.db.models.fields.related.ForeignObject')):
# This means that we are looking at a subclass of models.Model
# and something is trying to access a <something>_set attribute.
# Since this could exist, we will return so as not to raise an
# error.
return
chain()
def foreign_key_ids(chain, node):
if node.attrname.endswith('_id'):
return
chain()
def is_model_admin_subclass(node):
"""Checks that node is derivative of ModelAdmin class."""
if node.name[-5:] != 'Admin' or isinstance(node.parent, ClassDef):
return False
return node_is_subclass(node, 'django.contrib.admin.options.ModelAdmin')
def is_model_media_subclass(node):
"""Checks that node is derivative of Media class."""
if node.name != 'Media' or not isinstance(node.parent, ClassDef):
return False
parents = ('django.contrib.admin.options.ModelAdmin',
'django.forms.widgets.Media',
'django.db.models.base.Model',
'.Model', # for the transformed version used in this plugin
'django.forms.forms.Form',
'.Form',
'django.forms.widgets.Widget',
'.Widget',
'django.forms.models.ModelForm',
'.ModelForm')
return node_is_subclass(node.parent, *parents)
def is_model_meta_subclass(node):
"""Checks that node is derivative of Meta class."""
if node.name != 'Meta' or not isinstance(node.parent, ClassDef):
return False
parents = ('.Model', # for the transformed version used here
'django.db.models.base.Model',
'.Form',
'django.forms.forms.Form',
'.ModelForm',
'django.forms.models.ModelForm',
'rest_framework.serializers.BaseSerializer',
'rest_framework.generics.GenericAPIView',
'rest_framework.viewsets.ReadOnlyModelViewSet',
'rest_framework.viewsets.ModelViewSet',
'django_filters.filterset.FilterSet',
'factory.django.DjangoModelFactory',)
return node_is_subclass(node.parent, *parents)
def is_factory_post_generation_method(node):
if not node.decorators:
return False
for decorator in node.decorators.get_children():
try:
inferred = decorator.inferred()
except InferenceError:
continue
for target in inferred:
if target.qname() == 'factory.helpers.post_generation':
return True
return False
def is_model_mpttmeta_subclass(node):
"""Checks that node is derivative of MPTTMeta class."""
if node.name != 'MPTTMeta' or not isinstance(node.parent, ClassDef):
return False
parents = ('django.db.models.base.Model',
'.Model', # for the transformed version used in this plugin
'django.forms.forms.Form',
'.Form',
'django.forms.models.ModelForm',
'.ModelForm')
return node_is_subclass(node.parent, *parents)
def _attribute_is_magic(node, attrs, parents):
"""Checks that node is an attribute used inside one of allowed parents"""
if node.attrname not in attrs:
return False
if not node.last_child():
return False
try:
for cls in node.last_child().inferred():
if isinstance(cls, Super):
cls = cls._self_class # pylint: disable=protected-access
if node_is_subclass(cls, *parents) or cls.qname() in parents:
return True
except InferenceError:
pass
return False
def is_style_attribute(node):
parents = ('django.core.management.color.Style', )
return _attribute_is_magic(node, STYLE_ATTRS, parents)
def is_manager_attribute(node):
"""Checks that node is attribute of Manager or QuerySet class."""
parents = ('django.db.models.manager.Manager',
'.Manager',
'factory.base.BaseFactory.build',
'django.db.models.query.QuerySet',
'.QuerySet')
return _attribute_is_magic(node, MANAGER_ATTRS.union(QS_ATTRS), parents)
def is_admin_attribute(node):
"""Checks that node is attribute of BaseModelAdmin."""
parents = ('django.contrib.admin.options.BaseModelAdmin',
'.BaseModelAdmin')
return _attribute_is_magic(node, MODELADMIN_ATTRS, parents)
def is_model_attribute(node):
"""Checks that node is attribute of Model."""
parents = ('django.db.models.base.Model',
'.Model')
return _attribute_is_magic(node, MODEL_ATTRS, parents)
def is_field_attribute(node):
"""Checks that node is attribute of Field."""
parents = ('django.db.models.fields.Field',
'.Field')
return _attribute_is_magic(node, FIELD_ATTRS, parents)
def is_charfield_attribute(node):
"""Checks that node is attribute of CharField."""
parents = ('django.db.models.fields.CharField',
'.CharField')
return _attribute_is_magic(node, CHAR_FIELD_ATTRS, parents)
def is_datefield_attribute(node):
"""Checks that node is attribute of DateField."""
parents = ('django.db.models.fields.DateField',
'.DateField')
return _attribute_is_magic(node, DATE_FIELD_ATTRS, parents)
def is_decimalfield_attribute(node):
"""Checks that node is attribute of DecimalField."""
parents = ('django.db.models.fields.DecimalField',
'.DecimalField')
return _attribute_is_magic(node, DECIMAL_FIELD_ATTRS, parents)
def is_filefield_attribute(node):
"""Checks that node is attribute of FileField."""
parents = ('django.db.models.fields.files.FileField',
'.FileField')
return _attribute_is_magic(node, FILE_FIELD_ATTRS, parents)
def is_imagefield_attribute(node):
"""Checks that node is attribute of ImageField."""
parents = ('django.db.models.fields.files.ImageField',
'.ImageField')
return _attribute_is_magic(node, IMAGE_FIELD_ATTRS, parents)
def is_ipfield_attribute(node):
"""Checks that node is attribute of GenericIPAddressField."""
parents = ('django.db.models.fields.GenericIPAddressField',
'.GenericIPAddressField')
return _attribute_is_magic(node, IP_FIELD_ATTRS, parents)
def is_slugfield_attribute(node):
"""Checks that node is attribute of SlugField."""
parents = ('django.db.models.fields.SlugField',
'.SlugField')
return _attribute_is_magic(node, SLUG_FIELD_ATTRS, parents)
def is_foreignkeyfield_attribute(node):
"""Checks that node is attribute of ForeignKey."""
parents = ('django.db.models.fields.related.ForeignKey',
'.ForeignKey')
return _attribute_is_magic(node, FOREIGNKEY_FIELD_ATTRS, parents)
def is_manytomanyfield_attribute(node):
"""Checks that node is attribute of ManyToManyField."""
parents = ('django.db.models.fields.related.ManyToManyField',
'.ManyToManyField')
return _attribute_is_magic(node, MANYTOMANY_FIELD_ATTRS, parents)
def is_onetoonefield_attribute(node):
"""Checks that node is attribute of OneToOneField."""
parents = ('django.db.models.fields.related.OneToOneField',
'.OneToOneField')
return _attribute_is_magic(node, ONETOONE_FIELD_ATTRS, parents)
def is_form_attribute(node):
"""Checks that node is attribute of Form."""
parents = ('django.forms.forms.Form', 'django.forms.models.ModelForm')
return _attribute_is_magic(node, FORM_ATTRS, parents)
def is_model_test_case_subclass(node):
"""Checks that node is derivative of TestCase class."""
if not node.name.endswith('Test') and not isinstance(node.parent, ClassDef):
return False
return node_is_subclass(node, 'django.test.testcases.TestCase')
def generic_is_view_attribute(parents, attrs):
"""Generates is_X_attribute function for given parents and attrs."""
def is_attribute(node):
return _attribute_is_magic(node, attrs, parents)
return is_attribute
def is_model_view_subclass_method_shouldnt_be_function(node):
"""Checks that node is get or post method of the View class."""
if node.name not in ('get', 'post'):
return False
parent = node.parent
while parent and not isinstance(parent, ScopedClass):
parent = parent.parent
subclass = ('django.views.View',
'django.views.generic.View',
'django.views.generic.base.View',)
return parent is not None and node_is_subclass(parent, *subclass)
def is_model_view_subclass_unused_argument(node):
"""
Checks that node is get or post method of the View class and it has valid arguments.
TODO: Bad checkings, need to be more smart.
"""
if not is_model_view_subclass_method_shouldnt_be_function(node):
return False
return is_argument_named_request(node)
def is_argument_named_request(node):
"""
If an unused-argument is named 'request' ignore that!
"""
return 'request' in node.argnames()
def is_model_field_display_method(node):
"""Accept model's fields with get_*_display names."""
if not node.attrname.endswith('_display'):
return False
if not node.attrname.startswith('get_'):
return False
if node.last_child():
# TODO: could validate the names of the fields on the model rather than
# blindly accepting get_*_display
try:
for cls in node.last_child().inferred():
if node_is_subclass(cls, 'django.db.models.base.Model', '.Model'):
return True
except InferenceError:
return False
return False
def is_model_media_valid_attributes(node):
"""Suppress warnings for valid attributes of Media class."""
if node.name not in ('js', ):
return False
parent = node.parent
while parent and not isinstance(parent, ScopedClass):
parent = parent.parent
if parent is None or parent.name != "Media":
return False
return True
def is_templatetags_module_valid_constant(node):
"""Suppress warnings for valid constants in templatetags module."""
if node.name not in ('register', ):
return False
parent = node.parent
while not isinstance(parent, Module):
parent = parent.parent
if "templatetags." not in parent.name:
return False
return True
def is_urls_module_valid_constant(node):
"""Suppress warnings for valid constants in urls module."""
if node.name not in ('urlpatterns', 'app_name'):
return False
parent = node.parent
while not isinstance(parent, Module):
parent = parent.parent
if not parent.name.endswith('urls'):
return False
return True
def allow_meta_protected_access(node):
if django_version >= (1, 8):
return node.attrname == '_meta'
return False
def is_class(class_name):
"""Shortcut for node_is_subclass."""
return lambda node: node_is_subclass(node, class_name)
def wrap(orig_method, with_method):
def wrap_func(*args, **kwargs):
with_method(orig_method, *args, **kwargs)
return wrap_func
def is_wsgi_application(node):
frame = node.frame()
return node.name == 'application' and isinstance(frame, Module) and \
(frame.name == 'wsgi' or frame.path[0].endswith('wsgi.py') or frame.file.endswith('wsgi.py'))
# Compat helpers
def pylint_newstyle_classdef_compat(linter, warning_name, augment):
if not hasattr(NewStyleConflictChecker, 'visit_classdef'):
return
suppress_message(linter, getattr(NewStyleConflictChecker, 'visit_classdef'), warning_name, augment)
# augment things
def apply_augmentations(linter):
"""Apply augmentation and suppression rules."""
augment_visit(linter, TypeChecker.visit_attribute, foreign_key_sets)
augment_visit(linter, TypeChecker.visit_attribute, foreign_key_ids)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_field_display_method)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_style_attribute)
suppress_message(linter, NameChecker.visit_assignname, 'invalid-name', is_urls_module_valid_constant)
# supress errors when accessing magical class attributes
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_manager_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_admin_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_field_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_charfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_datefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_decimalfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_filefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_imagefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_ipfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_slugfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_foreignkeyfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_manytomanyfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_onetoonefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_form_attribute)
for parents, attrs in VIEW_ATTRS:
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', generic_is_view_attribute(parents, attrs))
# formviews have too many ancestors, there's nothing the user of the library can do about that
suppress_message(linter, MisdesignChecker.visit_classdef, 'too-many-ancestors',
is_class('django.views.generic.edit.FormView'))
# model forms have no __init__ method anywhere in their bases
suppress_message(linter, ClassChecker.visit_classdef, 'W0232', is_class('django.forms.models.ModelForm'))
# Meta
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_meta_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_meta_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'no-init', is_model_meta_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_meta_subclass)
suppress_message(linter, ClassChecker.visit_attribute, 'protected-access', allow_meta_protected_access)
# Media
suppress_message(linter, NameChecker.visit_assignname, 'C0103', is_model_media_valid_attributes)
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_media_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_media_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'no-init', is_model_media_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_media_subclass)
# Admin
# Too many public methods (40+/20)
# TODO: Count public methods of django.contrib.admin.options.ModelAdmin and increase
# MisdesignChecker.config.max_public_methods to this value to count only user' methods.
# nb_public_methods = 0
# for method in node.methods():
# if not method.name.startswith('_'):
# nb_public_methods += 1
suppress_message(linter, MisdesignChecker.leave_classdef, 'R0904', is_model_admin_subclass)
# Tests
suppress_message(linter, MisdesignChecker.leave_classdef, 'R0904', is_model_test_case_subclass)
# View
# Method could be a function (get, post)
suppress_message(linter, ClassChecker.leave_functiondef, 'no-self-use',
is_model_view_subclass_method_shouldnt_be_function)
# Unused argument 'request' (get, post)
suppress_message(linter, VariablesChecker.leave_functiondef, 'unused-argument',
is_model_view_subclass_unused_argument)
suppress_message(linter, VariablesChecker.leave_functiondef, 'unused-argument', is_argument_named_request)
# django-mptt
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_mpttmeta_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_mpttmeta_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'W0232', is_model_mpttmeta_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_mpttmeta_subclass)
# factory_boy's DjangoModelFactory
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_factory)
suppress_message(linter, ClassChecker.visit_functiondef, 'no-self-argument', is_factory_post_generation_method)
# ForeignKey and OneToOneField
# Must update this in a thread safe way to support the parallel option on pylint (-j)
current_leave_module = VariablesChecker.leave_module
if current_leave_module.__name__ == 'leave_module':
# current_leave_module is not wrapped
# Two threads may hit the next assignment concurrently, but the result is the same
VariablesChecker.leave_module = wrap(current_leave_module, ignore_import_warnings_for_related_fields)
# VariablesChecker.leave_module is now wrapped
# else VariablesChecker.leave_module is already wrapped
# wsgi.py
suppress_message(linter, NameChecker.visit_assignname, 'invalid-name', is_wsgi_application)
|
PyCQA/pylint-django
|
pylint_django/augmentations/__init__.py
|
is_model_mpttmeta_subclass
|
python
|
def is_model_mpttmeta_subclass(node):
if node.name != 'MPTTMeta' or not isinstance(node.parent, ClassDef):
return False
parents = ('django.db.models.base.Model',
'.Model', # for the transformed version used in this plugin
'django.forms.forms.Form',
'.Form',
'django.forms.models.ModelForm',
'.ModelForm')
return node_is_subclass(node.parent, *parents)
|
Checks that node is derivative of MPTTMeta class.
|
train
|
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/augmentations/__init__.py#L469-L480
|
[
"def node_is_subclass(cls, *subclass_names):\n \"\"\"Checks if cls node has parent with subclass_name.\"\"\"\n if not isinstance(cls, (ClassDef, Instance)):\n return False\n\n if cls.bases == Uninferable:\n return False\n for base_cls in cls.bases:\n try:\n for inf in base_cls.inferred():\n if inf.qname() in subclass_names:\n return True\n if inf != cls and node_is_subclass(inf, *subclass_names):\n # check up the hierarchy in case we are a subclass of\n # a subclass of a subclass ...\n return True\n except InferenceError:\n continue\n\n return False\n"
] |
"""Augmentations."""
# pylint: disable=invalid-name
import itertools
from astroid import InferenceError
from astroid.objects import Super
from astroid.nodes import ClassDef, ImportFrom, Attribute
from astroid.scoped_nodes import ClassDef as ScopedClass, Module
from pylint.checkers.base import DocStringChecker, NameChecker
from pylint.checkers.design_analysis import MisdesignChecker
from pylint.checkers.classes import ClassChecker
from pylint.checkers.newstyle import NewStyleConflictChecker
from pylint.checkers.variables import VariablesChecker
from pylint.checkers.typecheck import TypeChecker
from pylint.checkers.variables import ScopeConsumer
from pylint_plugin_utils import augment_visit, suppress_message
from django import VERSION as django_version
from django.views.generic.base import View, RedirectView, ContextMixin
from django.views.generic.dates import DateMixin, DayMixin, MonthMixin, WeekMixin, YearMixin
from django.views.generic.detail import SingleObjectMixin, SingleObjectTemplateResponseMixin, TemplateResponseMixin
from django.views.generic.edit import DeletionMixin, FormMixin, ModelFormMixin
from django.views.generic.list import MultipleObjectMixin, MultipleObjectTemplateResponseMixin
from django.utils import termcolors
from pylint_django.utils import node_is_subclass, PY3
# Note: it would have been nice to import the Manager object from Django and
# get its attributes that way - and this used to be the method - but unfortunately
# there's no guarantee that Django is properly configured at that stage, and importing
# anything from the django.db package causes an ImproperlyConfigured exception.
# Therefore we'll fall back on a hard-coded list of attributes which won't be as accurate,
# but this is not 100% accurate anyway.
MANAGER_ATTRS = {
'none',
'all',
'count',
'dates',
'distinct',
'extra',
'get',
'get_or_create',
'get_queryset',
'create',
'bulk_create',
'filter',
'aggregate',
'annotate',
'complex_filter',
'exclude',
'in_bulk',
'iterator',
'latest',
'order_by',
'select_for_update',
'select_related',
'prefetch_related',
'values',
'values_list',
'update',
'reverse',
'defer',
'only',
'using',
'exists',
}
QS_ATTRS = {
'filter',
'exclude',
'annotate',
'order_by',
'reverse',
'distinct',
'values',
'values_list',
'dates',
'datetimes',
'none',
'all',
'select_related',
'prefetch_related',
'extra',
'defer',
'only',
'using',
'select_for_update',
'raw',
'get',
'create',
'get_or_create',
'update_or_create',
'bulk_create',
'count',
'in_bulk',
'iterator',
'latest',
'earliest',
'first',
'last',
'aggregate',
'exists',
'update',
'delete',
'as_manager',
'expression',
'output_field',
}
MODELADMIN_ATTRS = {
# options
'actions',
'actions_on_top',
'actions_on_bottom',
'actions_selection_counter',
'date_hierarchy',
'empty_value_display',
'exclude',
'fields',
'fieldsets',
'filter_horizontal',
'filter_vertical',
'form',
'formfield_overrides',
'inlines',
'list_display',
'list_display_links',
'list_editable',
'list_filter',
'list_max_show_all',
'list_per_page',
'list_select_related',
'ordering',
'paginator',
'prepopulated_fields',
'preserve_filters',
'radio_fields',
'raw_id_fields',
'readonly_fields',
'save_as',
'save_on_top',
'search_fields',
'show_full_result_count',
'view_on_site',
# template options
'add_form_template',
'change_form_template',
'change_list_template',
'delete_confirmation_template',
'delete_selected_confirmation_template',
'object_history_template',
}
MODEL_ATTRS = {
'id',
'DoesNotExist',
'MultipleObjectsReturned',
'_base_manager',
'_default_manager',
'_meta',
'delete',
'get_next_by_date',
'get_previous_by_date',
'objects',
'save',
}
FIELD_ATTRS = {
'null',
'blank',
'choices',
'db_column',
'db_index',
'db_tablespace',
'default',
'editable',
'error_messages',
'help_text',
'primary_key',
'unique',
'unique_for_date',
'unique_for_month',
'unique_for_year',
'verbose_name',
'validators',
}
CHAR_FIELD_ATTRS = {
'max_length',
}
DATE_FIELD_ATTRS = {
'auto_now',
'auto_now_add',
}
DECIMAL_FIELD_ATTRS = {
'max_digits',
'decimal_places',
}
FILE_FIELD_ATTRS = {
'upload_to',
'storage',
}
IMAGE_FIELD_ATTRS = {
'height_field',
'width_field',
}
IP_FIELD_ATTRS = {
'protocol',
'unpack_ipv4',
}
SLUG_FIELD_ATTRS = {
'allow_unicode',
}
FOREIGNKEY_FIELD_ATTRS = {
'limit_choices_to',
'related_name',
'related_query_name',
'to_field',
'db_constraint',
'swappable',
}
MANYTOMANY_FIELD_ATTRS = {
'add',
'clear',
'related_name',
'related_query_name',
'limit_choices_to',
'symmetrical',
'through',
'through_fields',
'db_table',
'db_constraint',
'swappable',
}
ONETOONE_FIELD_ATTRS = {
'parent_link',
}
STYLE_ATTRS = set(itertools.chain.from_iterable(termcolors.PALETTES.values()))
VIEW_ATTRS = {
(
(
'{}.{}'.format(cls.__module__, cls.__name__),
'.{}'.format(cls.__name__)
),
tuple(cls.__dict__.keys())
) for cls in (
View, RedirectView, ContextMixin,
DateMixin, DayMixin, MonthMixin, WeekMixin, YearMixin,
SingleObjectMixin, SingleObjectTemplateResponseMixin, TemplateResponseMixin,
DeletionMixin, FormMixin, ModelFormMixin,
MultipleObjectMixin, MultipleObjectTemplateResponseMixin,
)
}
FORM_ATTRS = {
'declared_fields',
}
def ignore_import_warnings_for_related_fields(orig_method, self, node):
"""
Replaces the leave_module method on the VariablesChecker class to
prevent unused-import warnings which are caused by the ForeignKey
and OneToOneField transformations. By replacing the nodes in the
AST with their type rather than the django field, imports of the
form 'from django.db.models import OneToOneField' raise an unused-import
warning
"""
consumer = self._to_consume[0] # pylint: disable=W0212
# we can disable this warning ('Access to a protected member _to_consume of a client class')
# as it's not actually a client class, but rather, this method is being monkey patched
# onto the class and so the access is valid
new_things = {}
iterat = consumer.to_consume.items if PY3 else consumer.to_consume.iteritems
for name, stmts in iterat():
if isinstance(stmts[0], ImportFrom):
if any([n[0] in ('ForeignKey', 'OneToOneField') for n in stmts[0].names]):
continue
new_things[name] = stmts
consumer._atomic = ScopeConsumer(new_things, consumer.consumed, consumer.scope_type) # pylint: disable=W0212
self._to_consume = [consumer] # pylint: disable=W0212
return orig_method(self, node)
def foreign_key_sets(chain, node):
"""
When a Django model has a ForeignKey to another model, the target
of the foreign key gets a '<modelname>_set' attribute for accessing
a queryset of the model owning the foreign key - eg:
class ModelA(models.Model):
pass
class ModelB(models.Model):
a = models.ForeignKey(ModelA)
Now, ModelA instances will have a modelb_set attribute.
It's also possible to explicitly name the relationship using the related_name argument
to the ForeignKey constructor. As it's impossible to know this without inspecting all
models before processing, we'll instead do a "best guess" approach and see if the attribute
being accessed goes on to be used as a queryset. This is via 'duck typing': if the method
called on the attribute being accessed is something we might find in a queryset, we'll
warn.
"""
quack = False
if node.attrname in MANAGER_ATTRS or node.attrname.endswith('_set'):
# if this is a X_set method, that's a pretty strong signal that this is the default
# Django name, rather than one set by related_name
quack = True
else:
# we will
if isinstance(node.parent, Attribute):
func_name = getattr(node.parent, 'attrname', None)
if func_name in MANAGER_ATTRS:
quack = True
if quack:
children = list(node.get_children())
for child in children:
try:
inferred_cls = child.inferred()
except InferenceError:
pass
else:
for cls in inferred_cls:
if (node_is_subclass(cls,
'django.db.models.manager.Manager',
'django.db.models.base.Model',
'.Model',
'django.db.models.fields.related.ForeignObject')):
# This means that we are looking at a subclass of models.Model
# and something is trying to access a <something>_set attribute.
# Since this could exist, we will return so as not to raise an
# error.
return
chain()
def foreign_key_ids(chain, node):
if node.attrname.endswith('_id'):
return
chain()
def is_model_admin_subclass(node):
"""Checks that node is derivative of ModelAdmin class."""
if node.name[-5:] != 'Admin' or isinstance(node.parent, ClassDef):
return False
return node_is_subclass(node, 'django.contrib.admin.options.ModelAdmin')
def is_model_media_subclass(node):
"""Checks that node is derivative of Media class."""
if node.name != 'Media' or not isinstance(node.parent, ClassDef):
return False
parents = ('django.contrib.admin.options.ModelAdmin',
'django.forms.widgets.Media',
'django.db.models.base.Model',
'.Model', # for the transformed version used in this plugin
'django.forms.forms.Form',
'.Form',
'django.forms.widgets.Widget',
'.Widget',
'django.forms.models.ModelForm',
'.ModelForm')
return node_is_subclass(node.parent, *parents)
def is_model_meta_subclass(node):
"""Checks that node is derivative of Meta class."""
if node.name != 'Meta' or not isinstance(node.parent, ClassDef):
return False
parents = ('.Model', # for the transformed version used here
'django.db.models.base.Model',
'.Form',
'django.forms.forms.Form',
'.ModelForm',
'django.forms.models.ModelForm',
'rest_framework.serializers.BaseSerializer',
'rest_framework.generics.GenericAPIView',
'rest_framework.viewsets.ReadOnlyModelViewSet',
'rest_framework.viewsets.ModelViewSet',
'django_filters.filterset.FilterSet',
'factory.django.DjangoModelFactory',)
return node_is_subclass(node.parent, *parents)
def is_model_factory(node):
"""Checks that node is derivative of DjangoModelFactory or SubFactory class."""
try:
parent_classes = node.expr.inferred()
except: # noqa: E722, pylint: disable=bare-except
return False
parents = ('factory.declarations.LazyFunction',
'factory.declarations.SubFactory',
'factory.django.DjangoModelFactory')
for parent_class in parent_classes:
try:
if parent_class.qname() in parents:
return True
if node_is_subclass(parent_class, *parents):
return True
except AttributeError:
continue
return False
def is_factory_post_generation_method(node):
if not node.decorators:
return False
for decorator in node.decorators.get_children():
try:
inferred = decorator.inferred()
except InferenceError:
continue
for target in inferred:
if target.qname() == 'factory.helpers.post_generation':
return True
return False
def _attribute_is_magic(node, attrs, parents):
"""Checks that node is an attribute used inside one of allowed parents"""
if node.attrname not in attrs:
return False
if not node.last_child():
return False
try:
for cls in node.last_child().inferred():
if isinstance(cls, Super):
cls = cls._self_class # pylint: disable=protected-access
if node_is_subclass(cls, *parents) or cls.qname() in parents:
return True
except InferenceError:
pass
return False
def is_style_attribute(node):
parents = ('django.core.management.color.Style', )
return _attribute_is_magic(node, STYLE_ATTRS, parents)
def is_manager_attribute(node):
"""Checks that node is attribute of Manager or QuerySet class."""
parents = ('django.db.models.manager.Manager',
'.Manager',
'factory.base.BaseFactory.build',
'django.db.models.query.QuerySet',
'.QuerySet')
return _attribute_is_magic(node, MANAGER_ATTRS.union(QS_ATTRS), parents)
def is_admin_attribute(node):
"""Checks that node is attribute of BaseModelAdmin."""
parents = ('django.contrib.admin.options.BaseModelAdmin',
'.BaseModelAdmin')
return _attribute_is_magic(node, MODELADMIN_ATTRS, parents)
def is_model_attribute(node):
"""Checks that node is attribute of Model."""
parents = ('django.db.models.base.Model',
'.Model')
return _attribute_is_magic(node, MODEL_ATTRS, parents)
def is_field_attribute(node):
"""Checks that node is attribute of Field."""
parents = ('django.db.models.fields.Field',
'.Field')
return _attribute_is_magic(node, FIELD_ATTRS, parents)
def is_charfield_attribute(node):
"""Checks that node is attribute of CharField."""
parents = ('django.db.models.fields.CharField',
'.CharField')
return _attribute_is_magic(node, CHAR_FIELD_ATTRS, parents)
def is_datefield_attribute(node):
"""Checks that node is attribute of DateField."""
parents = ('django.db.models.fields.DateField',
'.DateField')
return _attribute_is_magic(node, DATE_FIELD_ATTRS, parents)
def is_decimalfield_attribute(node):
"""Checks that node is attribute of DecimalField."""
parents = ('django.db.models.fields.DecimalField',
'.DecimalField')
return _attribute_is_magic(node, DECIMAL_FIELD_ATTRS, parents)
def is_filefield_attribute(node):
"""Checks that node is attribute of FileField."""
parents = ('django.db.models.fields.files.FileField',
'.FileField')
return _attribute_is_magic(node, FILE_FIELD_ATTRS, parents)
def is_imagefield_attribute(node):
"""Checks that node is attribute of ImageField."""
parents = ('django.db.models.fields.files.ImageField',
'.ImageField')
return _attribute_is_magic(node, IMAGE_FIELD_ATTRS, parents)
def is_ipfield_attribute(node):
"""Checks that node is attribute of GenericIPAddressField."""
parents = ('django.db.models.fields.GenericIPAddressField',
'.GenericIPAddressField')
return _attribute_is_magic(node, IP_FIELD_ATTRS, parents)
def is_slugfield_attribute(node):
"""Checks that node is attribute of SlugField."""
parents = ('django.db.models.fields.SlugField',
'.SlugField')
return _attribute_is_magic(node, SLUG_FIELD_ATTRS, parents)
def is_foreignkeyfield_attribute(node):
"""Checks that node is attribute of ForeignKey."""
parents = ('django.db.models.fields.related.ForeignKey',
'.ForeignKey')
return _attribute_is_magic(node, FOREIGNKEY_FIELD_ATTRS, parents)
def is_manytomanyfield_attribute(node):
"""Checks that node is attribute of ManyToManyField."""
parents = ('django.db.models.fields.related.ManyToManyField',
'.ManyToManyField')
return _attribute_is_magic(node, MANYTOMANY_FIELD_ATTRS, parents)
def is_onetoonefield_attribute(node):
"""Checks that node is attribute of OneToOneField."""
parents = ('django.db.models.fields.related.OneToOneField',
'.OneToOneField')
return _attribute_is_magic(node, ONETOONE_FIELD_ATTRS, parents)
def is_form_attribute(node):
"""Checks that node is attribute of Form."""
parents = ('django.forms.forms.Form', 'django.forms.models.ModelForm')
return _attribute_is_magic(node, FORM_ATTRS, parents)
def is_model_test_case_subclass(node):
"""Checks that node is derivative of TestCase class."""
if not node.name.endswith('Test') and not isinstance(node.parent, ClassDef):
return False
return node_is_subclass(node, 'django.test.testcases.TestCase')
def generic_is_view_attribute(parents, attrs):
"""Generates is_X_attribute function for given parents and attrs."""
def is_attribute(node):
return _attribute_is_magic(node, attrs, parents)
return is_attribute
def is_model_view_subclass_method_shouldnt_be_function(node):
"""Checks that node is get or post method of the View class."""
if node.name not in ('get', 'post'):
return False
parent = node.parent
while parent and not isinstance(parent, ScopedClass):
parent = parent.parent
subclass = ('django.views.View',
'django.views.generic.View',
'django.views.generic.base.View',)
return parent is not None and node_is_subclass(parent, *subclass)
def is_model_view_subclass_unused_argument(node):
"""
Checks that node is get or post method of the View class and it has valid arguments.
TODO: Bad checkings, need to be more smart.
"""
if not is_model_view_subclass_method_shouldnt_be_function(node):
return False
return is_argument_named_request(node)
def is_argument_named_request(node):
"""
If an unused-argument is named 'request' ignore that!
"""
return 'request' in node.argnames()
def is_model_field_display_method(node):
"""Accept model's fields with get_*_display names."""
if not node.attrname.endswith('_display'):
return False
if not node.attrname.startswith('get_'):
return False
if node.last_child():
# TODO: could validate the names of the fields on the model rather than
# blindly accepting get_*_display
try:
for cls in node.last_child().inferred():
if node_is_subclass(cls, 'django.db.models.base.Model', '.Model'):
return True
except InferenceError:
return False
return False
def is_model_media_valid_attributes(node):
"""Suppress warnings for valid attributes of Media class."""
if node.name not in ('js', ):
return False
parent = node.parent
while parent and not isinstance(parent, ScopedClass):
parent = parent.parent
if parent is None or parent.name != "Media":
return False
return True
def is_templatetags_module_valid_constant(node):
"""Suppress warnings for valid constants in templatetags module."""
if node.name not in ('register', ):
return False
parent = node.parent
while not isinstance(parent, Module):
parent = parent.parent
if "templatetags." not in parent.name:
return False
return True
def is_urls_module_valid_constant(node):
"""Suppress warnings for valid constants in urls module."""
if node.name not in ('urlpatterns', 'app_name'):
return False
parent = node.parent
while not isinstance(parent, Module):
parent = parent.parent
if not parent.name.endswith('urls'):
return False
return True
def allow_meta_protected_access(node):
if django_version >= (1, 8):
return node.attrname == '_meta'
return False
def is_class(class_name):
"""Shortcut for node_is_subclass."""
return lambda node: node_is_subclass(node, class_name)
def wrap(orig_method, with_method):
def wrap_func(*args, **kwargs):
with_method(orig_method, *args, **kwargs)
return wrap_func
def is_wsgi_application(node):
frame = node.frame()
return node.name == 'application' and isinstance(frame, Module) and \
(frame.name == 'wsgi' or frame.path[0].endswith('wsgi.py') or frame.file.endswith('wsgi.py'))
# Compat helpers
def pylint_newstyle_classdef_compat(linter, warning_name, augment):
if not hasattr(NewStyleConflictChecker, 'visit_classdef'):
return
suppress_message(linter, getattr(NewStyleConflictChecker, 'visit_classdef'), warning_name, augment)
# augment things
def apply_augmentations(linter):
"""Apply augmentation and suppression rules."""
augment_visit(linter, TypeChecker.visit_attribute, foreign_key_sets)
augment_visit(linter, TypeChecker.visit_attribute, foreign_key_ids)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_field_display_method)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_style_attribute)
suppress_message(linter, NameChecker.visit_assignname, 'invalid-name', is_urls_module_valid_constant)
# supress errors when accessing magical class attributes
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_manager_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_admin_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_field_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_charfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_datefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_decimalfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_filefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_imagefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_ipfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_slugfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_foreignkeyfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_manytomanyfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_onetoonefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_form_attribute)
for parents, attrs in VIEW_ATTRS:
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', generic_is_view_attribute(parents, attrs))
# formviews have too many ancestors, there's nothing the user of the library can do about that
suppress_message(linter, MisdesignChecker.visit_classdef, 'too-many-ancestors',
is_class('django.views.generic.edit.FormView'))
# model forms have no __init__ method anywhere in their bases
suppress_message(linter, ClassChecker.visit_classdef, 'W0232', is_class('django.forms.models.ModelForm'))
# Meta
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_meta_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_meta_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'no-init', is_model_meta_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_meta_subclass)
suppress_message(linter, ClassChecker.visit_attribute, 'protected-access', allow_meta_protected_access)
# Media
suppress_message(linter, NameChecker.visit_assignname, 'C0103', is_model_media_valid_attributes)
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_media_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_media_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'no-init', is_model_media_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_media_subclass)
# Admin
# Too many public methods (40+/20)
# TODO: Count public methods of django.contrib.admin.options.ModelAdmin and increase
# MisdesignChecker.config.max_public_methods to this value to count only user' methods.
# nb_public_methods = 0
# for method in node.methods():
# if not method.name.startswith('_'):
# nb_public_methods += 1
suppress_message(linter, MisdesignChecker.leave_classdef, 'R0904', is_model_admin_subclass)
# Tests
suppress_message(linter, MisdesignChecker.leave_classdef, 'R0904', is_model_test_case_subclass)
# View
# Method could be a function (get, post)
suppress_message(linter, ClassChecker.leave_functiondef, 'no-self-use',
is_model_view_subclass_method_shouldnt_be_function)
# Unused argument 'request' (get, post)
suppress_message(linter, VariablesChecker.leave_functiondef, 'unused-argument',
is_model_view_subclass_unused_argument)
suppress_message(linter, VariablesChecker.leave_functiondef, 'unused-argument', is_argument_named_request)
# django-mptt
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_mpttmeta_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_mpttmeta_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'W0232', is_model_mpttmeta_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_mpttmeta_subclass)
# factory_boy's DjangoModelFactory
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_factory)
suppress_message(linter, ClassChecker.visit_functiondef, 'no-self-argument', is_factory_post_generation_method)
# ForeignKey and OneToOneField
# Must update this in a thread safe way to support the parallel option on pylint (-j)
current_leave_module = VariablesChecker.leave_module
if current_leave_module.__name__ == 'leave_module':
# current_leave_module is not wrapped
# Two threads may hit the next assignment concurrently, but the result is the same
VariablesChecker.leave_module = wrap(current_leave_module, ignore_import_warnings_for_related_fields)
# VariablesChecker.leave_module is now wrapped
# else VariablesChecker.leave_module is already wrapped
# wsgi.py
suppress_message(linter, NameChecker.visit_assignname, 'invalid-name', is_wsgi_application)
|
PyCQA/pylint-django
|
pylint_django/augmentations/__init__.py
|
_attribute_is_magic
|
python
|
def _attribute_is_magic(node, attrs, parents):
if node.attrname not in attrs:
return False
if not node.last_child():
return False
try:
for cls in node.last_child().inferred():
if isinstance(cls, Super):
cls = cls._self_class # pylint: disable=protected-access
if node_is_subclass(cls, *parents) or cls.qname() in parents:
return True
except InferenceError:
pass
return False
|
Checks that node is an attribute used inside one of allowed parents
|
train
|
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/augmentations/__init__.py#L483-L498
| null |
"""Augmentations."""
# pylint: disable=invalid-name
import itertools
from astroid import InferenceError
from astroid.objects import Super
from astroid.nodes import ClassDef, ImportFrom, Attribute
from astroid.scoped_nodes import ClassDef as ScopedClass, Module
from pylint.checkers.base import DocStringChecker, NameChecker
from pylint.checkers.design_analysis import MisdesignChecker
from pylint.checkers.classes import ClassChecker
from pylint.checkers.newstyle import NewStyleConflictChecker
from pylint.checkers.variables import VariablesChecker
from pylint.checkers.typecheck import TypeChecker
from pylint.checkers.variables import ScopeConsumer
from pylint_plugin_utils import augment_visit, suppress_message
from django import VERSION as django_version
from django.views.generic.base import View, RedirectView, ContextMixin
from django.views.generic.dates import DateMixin, DayMixin, MonthMixin, WeekMixin, YearMixin
from django.views.generic.detail import SingleObjectMixin, SingleObjectTemplateResponseMixin, TemplateResponseMixin
from django.views.generic.edit import DeletionMixin, FormMixin, ModelFormMixin
from django.views.generic.list import MultipleObjectMixin, MultipleObjectTemplateResponseMixin
from django.utils import termcolors
from pylint_django.utils import node_is_subclass, PY3
# Note: it would have been nice to import the Manager object from Django and
# get its attributes that way - and this used to be the method - but unfortunately
# there's no guarantee that Django is properly configured at that stage, and importing
# anything from the django.db package causes an ImproperlyConfigured exception.
# Therefore we'll fall back on a hard-coded list of attributes which won't be as accurate,
# but this is not 100% accurate anyway.
MANAGER_ATTRS = {
'none',
'all',
'count',
'dates',
'distinct',
'extra',
'get',
'get_or_create',
'get_queryset',
'create',
'bulk_create',
'filter',
'aggregate',
'annotate',
'complex_filter',
'exclude',
'in_bulk',
'iterator',
'latest',
'order_by',
'select_for_update',
'select_related',
'prefetch_related',
'values',
'values_list',
'update',
'reverse',
'defer',
'only',
'using',
'exists',
}
QS_ATTRS = {
'filter',
'exclude',
'annotate',
'order_by',
'reverse',
'distinct',
'values',
'values_list',
'dates',
'datetimes',
'none',
'all',
'select_related',
'prefetch_related',
'extra',
'defer',
'only',
'using',
'select_for_update',
'raw',
'get',
'create',
'get_or_create',
'update_or_create',
'bulk_create',
'count',
'in_bulk',
'iterator',
'latest',
'earliest',
'first',
'last',
'aggregate',
'exists',
'update',
'delete',
'as_manager',
'expression',
'output_field',
}
MODELADMIN_ATTRS = {
# options
'actions',
'actions_on_top',
'actions_on_bottom',
'actions_selection_counter',
'date_hierarchy',
'empty_value_display',
'exclude',
'fields',
'fieldsets',
'filter_horizontal',
'filter_vertical',
'form',
'formfield_overrides',
'inlines',
'list_display',
'list_display_links',
'list_editable',
'list_filter',
'list_max_show_all',
'list_per_page',
'list_select_related',
'ordering',
'paginator',
'prepopulated_fields',
'preserve_filters',
'radio_fields',
'raw_id_fields',
'readonly_fields',
'save_as',
'save_on_top',
'search_fields',
'show_full_result_count',
'view_on_site',
# template options
'add_form_template',
'change_form_template',
'change_list_template',
'delete_confirmation_template',
'delete_selected_confirmation_template',
'object_history_template',
}
MODEL_ATTRS = {
'id',
'DoesNotExist',
'MultipleObjectsReturned',
'_base_manager',
'_default_manager',
'_meta',
'delete',
'get_next_by_date',
'get_previous_by_date',
'objects',
'save',
}
FIELD_ATTRS = {
'null',
'blank',
'choices',
'db_column',
'db_index',
'db_tablespace',
'default',
'editable',
'error_messages',
'help_text',
'primary_key',
'unique',
'unique_for_date',
'unique_for_month',
'unique_for_year',
'verbose_name',
'validators',
}
CHAR_FIELD_ATTRS = {
'max_length',
}
DATE_FIELD_ATTRS = {
'auto_now',
'auto_now_add',
}
DECIMAL_FIELD_ATTRS = {
'max_digits',
'decimal_places',
}
FILE_FIELD_ATTRS = {
'upload_to',
'storage',
}
IMAGE_FIELD_ATTRS = {
'height_field',
'width_field',
}
IP_FIELD_ATTRS = {
'protocol',
'unpack_ipv4',
}
SLUG_FIELD_ATTRS = {
'allow_unicode',
}
FOREIGNKEY_FIELD_ATTRS = {
'limit_choices_to',
'related_name',
'related_query_name',
'to_field',
'db_constraint',
'swappable',
}
MANYTOMANY_FIELD_ATTRS = {
'add',
'clear',
'related_name',
'related_query_name',
'limit_choices_to',
'symmetrical',
'through',
'through_fields',
'db_table',
'db_constraint',
'swappable',
}
ONETOONE_FIELD_ATTRS = {
'parent_link',
}
STYLE_ATTRS = set(itertools.chain.from_iterable(termcolors.PALETTES.values()))
VIEW_ATTRS = {
(
(
'{}.{}'.format(cls.__module__, cls.__name__),
'.{}'.format(cls.__name__)
),
tuple(cls.__dict__.keys())
) for cls in (
View, RedirectView, ContextMixin,
DateMixin, DayMixin, MonthMixin, WeekMixin, YearMixin,
SingleObjectMixin, SingleObjectTemplateResponseMixin, TemplateResponseMixin,
DeletionMixin, FormMixin, ModelFormMixin,
MultipleObjectMixin, MultipleObjectTemplateResponseMixin,
)
}
FORM_ATTRS = {
'declared_fields',
}
def ignore_import_warnings_for_related_fields(orig_method, self, node):
"""
Replaces the leave_module method on the VariablesChecker class to
prevent unused-import warnings which are caused by the ForeignKey
and OneToOneField transformations. By replacing the nodes in the
AST with their type rather than the django field, imports of the
form 'from django.db.models import OneToOneField' raise an unused-import
warning
"""
consumer = self._to_consume[0] # pylint: disable=W0212
# we can disable this warning ('Access to a protected member _to_consume of a client class')
# as it's not actually a client class, but rather, this method is being monkey patched
# onto the class and so the access is valid
new_things = {}
iterat = consumer.to_consume.items if PY3 else consumer.to_consume.iteritems
for name, stmts in iterat():
if isinstance(stmts[0], ImportFrom):
if any([n[0] in ('ForeignKey', 'OneToOneField') for n in stmts[0].names]):
continue
new_things[name] = stmts
consumer._atomic = ScopeConsumer(new_things, consumer.consumed, consumer.scope_type) # pylint: disable=W0212
self._to_consume = [consumer] # pylint: disable=W0212
return orig_method(self, node)
def foreign_key_sets(chain, node):
"""
When a Django model has a ForeignKey to another model, the target
of the foreign key gets a '<modelname>_set' attribute for accessing
a queryset of the model owning the foreign key - eg:
class ModelA(models.Model):
pass
class ModelB(models.Model):
a = models.ForeignKey(ModelA)
Now, ModelA instances will have a modelb_set attribute.
It's also possible to explicitly name the relationship using the related_name argument
to the ForeignKey constructor. As it's impossible to know this without inspecting all
models before processing, we'll instead do a "best guess" approach and see if the attribute
being accessed goes on to be used as a queryset. This is via 'duck typing': if the method
called on the attribute being accessed is something we might find in a queryset, we'll
warn.
"""
quack = False
if node.attrname in MANAGER_ATTRS or node.attrname.endswith('_set'):
# if this is a X_set method, that's a pretty strong signal that this is the default
# Django name, rather than one set by related_name
quack = True
else:
# we will
if isinstance(node.parent, Attribute):
func_name = getattr(node.parent, 'attrname', None)
if func_name in MANAGER_ATTRS:
quack = True
if quack:
children = list(node.get_children())
for child in children:
try:
inferred_cls = child.inferred()
except InferenceError:
pass
else:
for cls in inferred_cls:
if (node_is_subclass(cls,
'django.db.models.manager.Manager',
'django.db.models.base.Model',
'.Model',
'django.db.models.fields.related.ForeignObject')):
# This means that we are looking at a subclass of models.Model
# and something is trying to access a <something>_set attribute.
# Since this could exist, we will return so as not to raise an
# error.
return
chain()
def foreign_key_ids(chain, node):
if node.attrname.endswith('_id'):
return
chain()
def is_model_admin_subclass(node):
"""Checks that node is derivative of ModelAdmin class."""
if node.name[-5:] != 'Admin' or isinstance(node.parent, ClassDef):
return False
return node_is_subclass(node, 'django.contrib.admin.options.ModelAdmin')
def is_model_media_subclass(node):
"""Checks that node is derivative of Media class."""
if node.name != 'Media' or not isinstance(node.parent, ClassDef):
return False
parents = ('django.contrib.admin.options.ModelAdmin',
'django.forms.widgets.Media',
'django.db.models.base.Model',
'.Model', # for the transformed version used in this plugin
'django.forms.forms.Form',
'.Form',
'django.forms.widgets.Widget',
'.Widget',
'django.forms.models.ModelForm',
'.ModelForm')
return node_is_subclass(node.parent, *parents)
def is_model_meta_subclass(node):
"""Checks that node is derivative of Meta class."""
if node.name != 'Meta' or not isinstance(node.parent, ClassDef):
return False
parents = ('.Model', # for the transformed version used here
'django.db.models.base.Model',
'.Form',
'django.forms.forms.Form',
'.ModelForm',
'django.forms.models.ModelForm',
'rest_framework.serializers.BaseSerializer',
'rest_framework.generics.GenericAPIView',
'rest_framework.viewsets.ReadOnlyModelViewSet',
'rest_framework.viewsets.ModelViewSet',
'django_filters.filterset.FilterSet',
'factory.django.DjangoModelFactory',)
return node_is_subclass(node.parent, *parents)
def is_model_factory(node):
"""Checks that node is derivative of DjangoModelFactory or SubFactory class."""
try:
parent_classes = node.expr.inferred()
except: # noqa: E722, pylint: disable=bare-except
return False
parents = ('factory.declarations.LazyFunction',
'factory.declarations.SubFactory',
'factory.django.DjangoModelFactory')
for parent_class in parent_classes:
try:
if parent_class.qname() in parents:
return True
if node_is_subclass(parent_class, *parents):
return True
except AttributeError:
continue
return False
def is_factory_post_generation_method(node):
if not node.decorators:
return False
for decorator in node.decorators.get_children():
try:
inferred = decorator.inferred()
except InferenceError:
continue
for target in inferred:
if target.qname() == 'factory.helpers.post_generation':
return True
return False
def is_model_mpttmeta_subclass(node):
"""Checks that node is derivative of MPTTMeta class."""
if node.name != 'MPTTMeta' or not isinstance(node.parent, ClassDef):
return False
parents = ('django.db.models.base.Model',
'.Model', # for the transformed version used in this plugin
'django.forms.forms.Form',
'.Form',
'django.forms.models.ModelForm',
'.ModelForm')
return node_is_subclass(node.parent, *parents)
def is_style_attribute(node):
parents = ('django.core.management.color.Style', )
return _attribute_is_magic(node, STYLE_ATTRS, parents)
def is_manager_attribute(node):
"""Checks that node is attribute of Manager or QuerySet class."""
parents = ('django.db.models.manager.Manager',
'.Manager',
'factory.base.BaseFactory.build',
'django.db.models.query.QuerySet',
'.QuerySet')
return _attribute_is_magic(node, MANAGER_ATTRS.union(QS_ATTRS), parents)
def is_admin_attribute(node):
"""Checks that node is attribute of BaseModelAdmin."""
parents = ('django.contrib.admin.options.BaseModelAdmin',
'.BaseModelAdmin')
return _attribute_is_magic(node, MODELADMIN_ATTRS, parents)
def is_model_attribute(node):
"""Checks that node is attribute of Model."""
parents = ('django.db.models.base.Model',
'.Model')
return _attribute_is_magic(node, MODEL_ATTRS, parents)
def is_field_attribute(node):
"""Checks that node is attribute of Field."""
parents = ('django.db.models.fields.Field',
'.Field')
return _attribute_is_magic(node, FIELD_ATTRS, parents)
def is_charfield_attribute(node):
"""Checks that node is attribute of CharField."""
parents = ('django.db.models.fields.CharField',
'.CharField')
return _attribute_is_magic(node, CHAR_FIELD_ATTRS, parents)
def is_datefield_attribute(node):
"""Checks that node is attribute of DateField."""
parents = ('django.db.models.fields.DateField',
'.DateField')
return _attribute_is_magic(node, DATE_FIELD_ATTRS, parents)
def is_decimalfield_attribute(node):
"""Checks that node is attribute of DecimalField."""
parents = ('django.db.models.fields.DecimalField',
'.DecimalField')
return _attribute_is_magic(node, DECIMAL_FIELD_ATTRS, parents)
def is_filefield_attribute(node):
"""Checks that node is attribute of FileField."""
parents = ('django.db.models.fields.files.FileField',
'.FileField')
return _attribute_is_magic(node, FILE_FIELD_ATTRS, parents)
def is_imagefield_attribute(node):
"""Checks that node is attribute of ImageField."""
parents = ('django.db.models.fields.files.ImageField',
'.ImageField')
return _attribute_is_magic(node, IMAGE_FIELD_ATTRS, parents)
def is_ipfield_attribute(node):
"""Checks that node is attribute of GenericIPAddressField."""
parents = ('django.db.models.fields.GenericIPAddressField',
'.GenericIPAddressField')
return _attribute_is_magic(node, IP_FIELD_ATTRS, parents)
def is_slugfield_attribute(node):
"""Checks that node is attribute of SlugField."""
parents = ('django.db.models.fields.SlugField',
'.SlugField')
return _attribute_is_magic(node, SLUG_FIELD_ATTRS, parents)
def is_foreignkeyfield_attribute(node):
"""Checks that node is attribute of ForeignKey."""
parents = ('django.db.models.fields.related.ForeignKey',
'.ForeignKey')
return _attribute_is_magic(node, FOREIGNKEY_FIELD_ATTRS, parents)
def is_manytomanyfield_attribute(node):
"""Checks that node is attribute of ManyToManyField."""
parents = ('django.db.models.fields.related.ManyToManyField',
'.ManyToManyField')
return _attribute_is_magic(node, MANYTOMANY_FIELD_ATTRS, parents)
def is_onetoonefield_attribute(node):
"""Checks that node is attribute of OneToOneField."""
parents = ('django.db.models.fields.related.OneToOneField',
'.OneToOneField')
return _attribute_is_magic(node, ONETOONE_FIELD_ATTRS, parents)
def is_form_attribute(node):
"""Checks that node is attribute of Form."""
parents = ('django.forms.forms.Form', 'django.forms.models.ModelForm')
return _attribute_is_magic(node, FORM_ATTRS, parents)
def is_model_test_case_subclass(node):
"""Checks that node is derivative of TestCase class."""
if not node.name.endswith('Test') and not isinstance(node.parent, ClassDef):
return False
return node_is_subclass(node, 'django.test.testcases.TestCase')
def generic_is_view_attribute(parents, attrs):
"""Generates is_X_attribute function for given parents and attrs."""
def is_attribute(node):
return _attribute_is_magic(node, attrs, parents)
return is_attribute
def is_model_view_subclass_method_shouldnt_be_function(node):
"""Checks that node is get or post method of the View class."""
if node.name not in ('get', 'post'):
return False
parent = node.parent
while parent and not isinstance(parent, ScopedClass):
parent = parent.parent
subclass = ('django.views.View',
'django.views.generic.View',
'django.views.generic.base.View',)
return parent is not None and node_is_subclass(parent, *subclass)
def is_model_view_subclass_unused_argument(node):
"""
Checks that node is get or post method of the View class and it has valid arguments.
TODO: Bad checkings, need to be more smart.
"""
if not is_model_view_subclass_method_shouldnt_be_function(node):
return False
return is_argument_named_request(node)
def is_argument_named_request(node):
"""
If an unused-argument is named 'request' ignore that!
"""
return 'request' in node.argnames()
def is_model_field_display_method(node):
"""Accept model's fields with get_*_display names."""
if not node.attrname.endswith('_display'):
return False
if not node.attrname.startswith('get_'):
return False
if node.last_child():
# TODO: could validate the names of the fields on the model rather than
# blindly accepting get_*_display
try:
for cls in node.last_child().inferred():
if node_is_subclass(cls, 'django.db.models.base.Model', '.Model'):
return True
except InferenceError:
return False
return False
def is_model_media_valid_attributes(node):
"""Suppress warnings for valid attributes of Media class."""
if node.name not in ('js', ):
return False
parent = node.parent
while parent and not isinstance(parent, ScopedClass):
parent = parent.parent
if parent is None or parent.name != "Media":
return False
return True
def is_templatetags_module_valid_constant(node):
"""Suppress warnings for valid constants in templatetags module."""
if node.name not in ('register', ):
return False
parent = node.parent
while not isinstance(parent, Module):
parent = parent.parent
if "templatetags." not in parent.name:
return False
return True
def is_urls_module_valid_constant(node):
"""Suppress warnings for valid constants in urls module."""
if node.name not in ('urlpatterns', 'app_name'):
return False
parent = node.parent
while not isinstance(parent, Module):
parent = parent.parent
if not parent.name.endswith('urls'):
return False
return True
def allow_meta_protected_access(node):
if django_version >= (1, 8):
return node.attrname == '_meta'
return False
def is_class(class_name):
"""Shortcut for node_is_subclass."""
return lambda node: node_is_subclass(node, class_name)
def wrap(orig_method, with_method):
def wrap_func(*args, **kwargs):
with_method(orig_method, *args, **kwargs)
return wrap_func
def is_wsgi_application(node):
frame = node.frame()
return node.name == 'application' and isinstance(frame, Module) and \
(frame.name == 'wsgi' or frame.path[0].endswith('wsgi.py') or frame.file.endswith('wsgi.py'))
# Compat helpers
def pylint_newstyle_classdef_compat(linter, warning_name, augment):
if not hasattr(NewStyleConflictChecker, 'visit_classdef'):
return
suppress_message(linter, getattr(NewStyleConflictChecker, 'visit_classdef'), warning_name, augment)
# augment things
def apply_augmentations(linter):
"""Apply augmentation and suppression rules."""
augment_visit(linter, TypeChecker.visit_attribute, foreign_key_sets)
augment_visit(linter, TypeChecker.visit_attribute, foreign_key_ids)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_field_display_method)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_style_attribute)
suppress_message(linter, NameChecker.visit_assignname, 'invalid-name', is_urls_module_valid_constant)
# supress errors when accessing magical class attributes
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_manager_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_admin_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_field_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_charfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_datefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_decimalfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_filefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_imagefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_ipfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_slugfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_foreignkeyfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_manytomanyfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_onetoonefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_form_attribute)
for parents, attrs in VIEW_ATTRS:
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', generic_is_view_attribute(parents, attrs))
# formviews have too many ancestors, there's nothing the user of the library can do about that
suppress_message(linter, MisdesignChecker.visit_classdef, 'too-many-ancestors',
is_class('django.views.generic.edit.FormView'))
# model forms have no __init__ method anywhere in their bases
suppress_message(linter, ClassChecker.visit_classdef, 'W0232', is_class('django.forms.models.ModelForm'))
# Meta
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_meta_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_meta_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'no-init', is_model_meta_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_meta_subclass)
suppress_message(linter, ClassChecker.visit_attribute, 'protected-access', allow_meta_protected_access)
# Media
suppress_message(linter, NameChecker.visit_assignname, 'C0103', is_model_media_valid_attributes)
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_media_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_media_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'no-init', is_model_media_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_media_subclass)
# Admin
# Too many public methods (40+/20)
# TODO: Count public methods of django.contrib.admin.options.ModelAdmin and increase
# MisdesignChecker.config.max_public_methods to this value to count only user' methods.
# nb_public_methods = 0
# for method in node.methods():
# if not method.name.startswith('_'):
# nb_public_methods += 1
suppress_message(linter, MisdesignChecker.leave_classdef, 'R0904', is_model_admin_subclass)
# Tests
suppress_message(linter, MisdesignChecker.leave_classdef, 'R0904', is_model_test_case_subclass)
# View
# Method could be a function (get, post)
suppress_message(linter, ClassChecker.leave_functiondef, 'no-self-use',
is_model_view_subclass_method_shouldnt_be_function)
# Unused argument 'request' (get, post)
suppress_message(linter, VariablesChecker.leave_functiondef, 'unused-argument',
is_model_view_subclass_unused_argument)
suppress_message(linter, VariablesChecker.leave_functiondef, 'unused-argument', is_argument_named_request)
# django-mptt
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_mpttmeta_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_mpttmeta_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'W0232', is_model_mpttmeta_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_mpttmeta_subclass)
# factory_boy's DjangoModelFactory
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_factory)
suppress_message(linter, ClassChecker.visit_functiondef, 'no-self-argument', is_factory_post_generation_method)
# ForeignKey and OneToOneField
# Must update this in a thread safe way to support the parallel option on pylint (-j)
current_leave_module = VariablesChecker.leave_module
if current_leave_module.__name__ == 'leave_module':
# current_leave_module is not wrapped
# Two threads may hit the next assignment concurrently, but the result is the same
VariablesChecker.leave_module = wrap(current_leave_module, ignore_import_warnings_for_related_fields)
# VariablesChecker.leave_module is now wrapped
# else VariablesChecker.leave_module is already wrapped
# wsgi.py
suppress_message(linter, NameChecker.visit_assignname, 'invalid-name', is_wsgi_application)
|
PyCQA/pylint-django
|
pylint_django/augmentations/__init__.py
|
generic_is_view_attribute
|
python
|
def generic_is_view_attribute(parents, attrs):
def is_attribute(node):
return _attribute_is_magic(node, attrs, parents)
return is_attribute
|
Generates is_X_attribute function for given parents and attrs.
|
train
|
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/augmentations/__init__.py#L621-L625
| null |
"""Augmentations."""
# pylint: disable=invalid-name
import itertools
from astroid import InferenceError
from astroid.objects import Super
from astroid.nodes import ClassDef, ImportFrom, Attribute
from astroid.scoped_nodes import ClassDef as ScopedClass, Module
from pylint.checkers.base import DocStringChecker, NameChecker
from pylint.checkers.design_analysis import MisdesignChecker
from pylint.checkers.classes import ClassChecker
from pylint.checkers.newstyle import NewStyleConflictChecker
from pylint.checkers.variables import VariablesChecker
from pylint.checkers.typecheck import TypeChecker
from pylint.checkers.variables import ScopeConsumer
from pylint_plugin_utils import augment_visit, suppress_message
from django import VERSION as django_version
from django.views.generic.base import View, RedirectView, ContextMixin
from django.views.generic.dates import DateMixin, DayMixin, MonthMixin, WeekMixin, YearMixin
from django.views.generic.detail import SingleObjectMixin, SingleObjectTemplateResponseMixin, TemplateResponseMixin
from django.views.generic.edit import DeletionMixin, FormMixin, ModelFormMixin
from django.views.generic.list import MultipleObjectMixin, MultipleObjectTemplateResponseMixin
from django.utils import termcolors
from pylint_django.utils import node_is_subclass, PY3
# Note: it would have been nice to import the Manager object from Django and
# get its attributes that way - and this used to be the method - but unfortunately
# there's no guarantee that Django is properly configured at that stage, and importing
# anything from the django.db package causes an ImproperlyConfigured exception.
# Therefore we'll fall back on a hard-coded list of attributes which won't be as accurate,
# but this is not 100% accurate anyway.
MANAGER_ATTRS = {
'none',
'all',
'count',
'dates',
'distinct',
'extra',
'get',
'get_or_create',
'get_queryset',
'create',
'bulk_create',
'filter',
'aggregate',
'annotate',
'complex_filter',
'exclude',
'in_bulk',
'iterator',
'latest',
'order_by',
'select_for_update',
'select_related',
'prefetch_related',
'values',
'values_list',
'update',
'reverse',
'defer',
'only',
'using',
'exists',
}
QS_ATTRS = {
'filter',
'exclude',
'annotate',
'order_by',
'reverse',
'distinct',
'values',
'values_list',
'dates',
'datetimes',
'none',
'all',
'select_related',
'prefetch_related',
'extra',
'defer',
'only',
'using',
'select_for_update',
'raw',
'get',
'create',
'get_or_create',
'update_or_create',
'bulk_create',
'count',
'in_bulk',
'iterator',
'latest',
'earliest',
'first',
'last',
'aggregate',
'exists',
'update',
'delete',
'as_manager',
'expression',
'output_field',
}
MODELADMIN_ATTRS = {
# options
'actions',
'actions_on_top',
'actions_on_bottom',
'actions_selection_counter',
'date_hierarchy',
'empty_value_display',
'exclude',
'fields',
'fieldsets',
'filter_horizontal',
'filter_vertical',
'form',
'formfield_overrides',
'inlines',
'list_display',
'list_display_links',
'list_editable',
'list_filter',
'list_max_show_all',
'list_per_page',
'list_select_related',
'ordering',
'paginator',
'prepopulated_fields',
'preserve_filters',
'radio_fields',
'raw_id_fields',
'readonly_fields',
'save_as',
'save_on_top',
'search_fields',
'show_full_result_count',
'view_on_site',
# template options
'add_form_template',
'change_form_template',
'change_list_template',
'delete_confirmation_template',
'delete_selected_confirmation_template',
'object_history_template',
}
MODEL_ATTRS = {
'id',
'DoesNotExist',
'MultipleObjectsReturned',
'_base_manager',
'_default_manager',
'_meta',
'delete',
'get_next_by_date',
'get_previous_by_date',
'objects',
'save',
}
FIELD_ATTRS = {
'null',
'blank',
'choices',
'db_column',
'db_index',
'db_tablespace',
'default',
'editable',
'error_messages',
'help_text',
'primary_key',
'unique',
'unique_for_date',
'unique_for_month',
'unique_for_year',
'verbose_name',
'validators',
}
CHAR_FIELD_ATTRS = {
'max_length',
}
DATE_FIELD_ATTRS = {
'auto_now',
'auto_now_add',
}
DECIMAL_FIELD_ATTRS = {
'max_digits',
'decimal_places',
}
FILE_FIELD_ATTRS = {
'upload_to',
'storage',
}
IMAGE_FIELD_ATTRS = {
'height_field',
'width_field',
}
IP_FIELD_ATTRS = {
'protocol',
'unpack_ipv4',
}
SLUG_FIELD_ATTRS = {
'allow_unicode',
}
FOREIGNKEY_FIELD_ATTRS = {
'limit_choices_to',
'related_name',
'related_query_name',
'to_field',
'db_constraint',
'swappable',
}
MANYTOMANY_FIELD_ATTRS = {
'add',
'clear',
'related_name',
'related_query_name',
'limit_choices_to',
'symmetrical',
'through',
'through_fields',
'db_table',
'db_constraint',
'swappable',
}
ONETOONE_FIELD_ATTRS = {
'parent_link',
}
STYLE_ATTRS = set(itertools.chain.from_iterable(termcolors.PALETTES.values()))
VIEW_ATTRS = {
(
(
'{}.{}'.format(cls.__module__, cls.__name__),
'.{}'.format(cls.__name__)
),
tuple(cls.__dict__.keys())
) for cls in (
View, RedirectView, ContextMixin,
DateMixin, DayMixin, MonthMixin, WeekMixin, YearMixin,
SingleObjectMixin, SingleObjectTemplateResponseMixin, TemplateResponseMixin,
DeletionMixin, FormMixin, ModelFormMixin,
MultipleObjectMixin, MultipleObjectTemplateResponseMixin,
)
}
FORM_ATTRS = {
'declared_fields',
}
def ignore_import_warnings_for_related_fields(orig_method, self, node):
"""
Replaces the leave_module method on the VariablesChecker class to
prevent unused-import warnings which are caused by the ForeignKey
and OneToOneField transformations. By replacing the nodes in the
AST with their type rather than the django field, imports of the
form 'from django.db.models import OneToOneField' raise an unused-import
warning
"""
consumer = self._to_consume[0] # pylint: disable=W0212
# we can disable this warning ('Access to a protected member _to_consume of a client class')
# as it's not actually a client class, but rather, this method is being monkey patched
# onto the class and so the access is valid
new_things = {}
iterat = consumer.to_consume.items if PY3 else consumer.to_consume.iteritems
for name, stmts in iterat():
if isinstance(stmts[0], ImportFrom):
if any([n[0] in ('ForeignKey', 'OneToOneField') for n in stmts[0].names]):
continue
new_things[name] = stmts
consumer._atomic = ScopeConsumer(new_things, consumer.consumed, consumer.scope_type) # pylint: disable=W0212
self._to_consume = [consumer] # pylint: disable=W0212
return orig_method(self, node)
def foreign_key_sets(chain, node):
"""
When a Django model has a ForeignKey to another model, the target
of the foreign key gets a '<modelname>_set' attribute for accessing
a queryset of the model owning the foreign key - eg:
class ModelA(models.Model):
pass
class ModelB(models.Model):
a = models.ForeignKey(ModelA)
Now, ModelA instances will have a modelb_set attribute.
It's also possible to explicitly name the relationship using the related_name argument
to the ForeignKey constructor. As it's impossible to know this without inspecting all
models before processing, we'll instead do a "best guess" approach and see if the attribute
being accessed goes on to be used as a queryset. This is via 'duck typing': if the method
called on the attribute being accessed is something we might find in a queryset, we'll
warn.
"""
quack = False
if node.attrname in MANAGER_ATTRS or node.attrname.endswith('_set'):
# if this is a X_set method, that's a pretty strong signal that this is the default
# Django name, rather than one set by related_name
quack = True
else:
# we will
if isinstance(node.parent, Attribute):
func_name = getattr(node.parent, 'attrname', None)
if func_name in MANAGER_ATTRS:
quack = True
if quack:
children = list(node.get_children())
for child in children:
try:
inferred_cls = child.inferred()
except InferenceError:
pass
else:
for cls in inferred_cls:
if (node_is_subclass(cls,
'django.db.models.manager.Manager',
'django.db.models.base.Model',
'.Model',
'django.db.models.fields.related.ForeignObject')):
# This means that we are looking at a subclass of models.Model
# and something is trying to access a <something>_set attribute.
# Since this could exist, we will return so as not to raise an
# error.
return
chain()
def foreign_key_ids(chain, node):
if node.attrname.endswith('_id'):
return
chain()
def is_model_admin_subclass(node):
"""Checks that node is derivative of ModelAdmin class."""
if node.name[-5:] != 'Admin' or isinstance(node.parent, ClassDef):
return False
return node_is_subclass(node, 'django.contrib.admin.options.ModelAdmin')
def is_model_media_subclass(node):
"""Checks that node is derivative of Media class."""
if node.name != 'Media' or not isinstance(node.parent, ClassDef):
return False
parents = ('django.contrib.admin.options.ModelAdmin',
'django.forms.widgets.Media',
'django.db.models.base.Model',
'.Model', # for the transformed version used in this plugin
'django.forms.forms.Form',
'.Form',
'django.forms.widgets.Widget',
'.Widget',
'django.forms.models.ModelForm',
'.ModelForm')
return node_is_subclass(node.parent, *parents)
def is_model_meta_subclass(node):
"""Checks that node is derivative of Meta class."""
if node.name != 'Meta' or not isinstance(node.parent, ClassDef):
return False
parents = ('.Model', # for the transformed version used here
'django.db.models.base.Model',
'.Form',
'django.forms.forms.Form',
'.ModelForm',
'django.forms.models.ModelForm',
'rest_framework.serializers.BaseSerializer',
'rest_framework.generics.GenericAPIView',
'rest_framework.viewsets.ReadOnlyModelViewSet',
'rest_framework.viewsets.ModelViewSet',
'django_filters.filterset.FilterSet',
'factory.django.DjangoModelFactory',)
return node_is_subclass(node.parent, *parents)
def is_model_factory(node):
"""Checks that node is derivative of DjangoModelFactory or SubFactory class."""
try:
parent_classes = node.expr.inferred()
except: # noqa: E722, pylint: disable=bare-except
return False
parents = ('factory.declarations.LazyFunction',
'factory.declarations.SubFactory',
'factory.django.DjangoModelFactory')
for parent_class in parent_classes:
try:
if parent_class.qname() in parents:
return True
if node_is_subclass(parent_class, *parents):
return True
except AttributeError:
continue
return False
def is_factory_post_generation_method(node):
if not node.decorators:
return False
for decorator in node.decorators.get_children():
try:
inferred = decorator.inferred()
except InferenceError:
continue
for target in inferred:
if target.qname() == 'factory.helpers.post_generation':
return True
return False
def is_model_mpttmeta_subclass(node):
"""Checks that node is derivative of MPTTMeta class."""
if node.name != 'MPTTMeta' or not isinstance(node.parent, ClassDef):
return False
parents = ('django.db.models.base.Model',
'.Model', # for the transformed version used in this plugin
'django.forms.forms.Form',
'.Form',
'django.forms.models.ModelForm',
'.ModelForm')
return node_is_subclass(node.parent, *parents)
def _attribute_is_magic(node, attrs, parents):
"""Checks that node is an attribute used inside one of allowed parents"""
if node.attrname not in attrs:
return False
if not node.last_child():
return False
try:
for cls in node.last_child().inferred():
if isinstance(cls, Super):
cls = cls._self_class # pylint: disable=protected-access
if node_is_subclass(cls, *parents) or cls.qname() in parents:
return True
except InferenceError:
pass
return False
def is_style_attribute(node):
parents = ('django.core.management.color.Style', )
return _attribute_is_magic(node, STYLE_ATTRS, parents)
def is_manager_attribute(node):
"""Checks that node is attribute of Manager or QuerySet class."""
parents = ('django.db.models.manager.Manager',
'.Manager',
'factory.base.BaseFactory.build',
'django.db.models.query.QuerySet',
'.QuerySet')
return _attribute_is_magic(node, MANAGER_ATTRS.union(QS_ATTRS), parents)
def is_admin_attribute(node):
"""Checks that node is attribute of BaseModelAdmin."""
parents = ('django.contrib.admin.options.BaseModelAdmin',
'.BaseModelAdmin')
return _attribute_is_magic(node, MODELADMIN_ATTRS, parents)
def is_model_attribute(node):
"""Checks that node is attribute of Model."""
parents = ('django.db.models.base.Model',
'.Model')
return _attribute_is_magic(node, MODEL_ATTRS, parents)
def is_field_attribute(node):
"""Checks that node is attribute of Field."""
parents = ('django.db.models.fields.Field',
'.Field')
return _attribute_is_magic(node, FIELD_ATTRS, parents)
def is_charfield_attribute(node):
"""Checks that node is attribute of CharField."""
parents = ('django.db.models.fields.CharField',
'.CharField')
return _attribute_is_magic(node, CHAR_FIELD_ATTRS, parents)
def is_datefield_attribute(node):
"""Checks that node is attribute of DateField."""
parents = ('django.db.models.fields.DateField',
'.DateField')
return _attribute_is_magic(node, DATE_FIELD_ATTRS, parents)
def is_decimalfield_attribute(node):
"""Checks that node is attribute of DecimalField."""
parents = ('django.db.models.fields.DecimalField',
'.DecimalField')
return _attribute_is_magic(node, DECIMAL_FIELD_ATTRS, parents)
def is_filefield_attribute(node):
"""Checks that node is attribute of FileField."""
parents = ('django.db.models.fields.files.FileField',
'.FileField')
return _attribute_is_magic(node, FILE_FIELD_ATTRS, parents)
def is_imagefield_attribute(node):
"""Checks that node is attribute of ImageField."""
parents = ('django.db.models.fields.files.ImageField',
'.ImageField')
return _attribute_is_magic(node, IMAGE_FIELD_ATTRS, parents)
def is_ipfield_attribute(node):
"""Checks that node is attribute of GenericIPAddressField."""
parents = ('django.db.models.fields.GenericIPAddressField',
'.GenericIPAddressField')
return _attribute_is_magic(node, IP_FIELD_ATTRS, parents)
def is_slugfield_attribute(node):
"""Checks that node is attribute of SlugField."""
parents = ('django.db.models.fields.SlugField',
'.SlugField')
return _attribute_is_magic(node, SLUG_FIELD_ATTRS, parents)
def is_foreignkeyfield_attribute(node):
"""Checks that node is attribute of ForeignKey."""
parents = ('django.db.models.fields.related.ForeignKey',
'.ForeignKey')
return _attribute_is_magic(node, FOREIGNKEY_FIELD_ATTRS, parents)
def is_manytomanyfield_attribute(node):
"""Checks that node is attribute of ManyToManyField."""
parents = ('django.db.models.fields.related.ManyToManyField',
'.ManyToManyField')
return _attribute_is_magic(node, MANYTOMANY_FIELD_ATTRS, parents)
def is_onetoonefield_attribute(node):
"""Checks that node is attribute of OneToOneField."""
parents = ('django.db.models.fields.related.OneToOneField',
'.OneToOneField')
return _attribute_is_magic(node, ONETOONE_FIELD_ATTRS, parents)
def is_form_attribute(node):
"""Checks that node is attribute of Form."""
parents = ('django.forms.forms.Form', 'django.forms.models.ModelForm')
return _attribute_is_magic(node, FORM_ATTRS, parents)
def is_model_test_case_subclass(node):
"""Checks that node is derivative of TestCase class."""
if not node.name.endswith('Test') and not isinstance(node.parent, ClassDef):
return False
return node_is_subclass(node, 'django.test.testcases.TestCase')
def is_model_view_subclass_method_shouldnt_be_function(node):
"""Checks that node is get or post method of the View class."""
if node.name not in ('get', 'post'):
return False
parent = node.parent
while parent and not isinstance(parent, ScopedClass):
parent = parent.parent
subclass = ('django.views.View',
'django.views.generic.View',
'django.views.generic.base.View',)
return parent is not None and node_is_subclass(parent, *subclass)
def is_model_view_subclass_unused_argument(node):
"""
Checks that node is get or post method of the View class and it has valid arguments.
TODO: Bad checkings, need to be more smart.
"""
if not is_model_view_subclass_method_shouldnt_be_function(node):
return False
return is_argument_named_request(node)
def is_argument_named_request(node):
"""
If an unused-argument is named 'request' ignore that!
"""
return 'request' in node.argnames()
def is_model_field_display_method(node):
"""Accept model's fields with get_*_display names."""
if not node.attrname.endswith('_display'):
return False
if not node.attrname.startswith('get_'):
return False
if node.last_child():
# TODO: could validate the names of the fields on the model rather than
# blindly accepting get_*_display
try:
for cls in node.last_child().inferred():
if node_is_subclass(cls, 'django.db.models.base.Model', '.Model'):
return True
except InferenceError:
return False
return False
def is_model_media_valid_attributes(node):
"""Suppress warnings for valid attributes of Media class."""
if node.name not in ('js', ):
return False
parent = node.parent
while parent and not isinstance(parent, ScopedClass):
parent = parent.parent
if parent is None or parent.name != "Media":
return False
return True
def is_templatetags_module_valid_constant(node):
"""Suppress warnings for valid constants in templatetags module."""
if node.name not in ('register', ):
return False
parent = node.parent
while not isinstance(parent, Module):
parent = parent.parent
if "templatetags." not in parent.name:
return False
return True
def is_urls_module_valid_constant(node):
"""Suppress warnings for valid constants in urls module."""
if node.name not in ('urlpatterns', 'app_name'):
return False
parent = node.parent
while not isinstance(parent, Module):
parent = parent.parent
if not parent.name.endswith('urls'):
return False
return True
def allow_meta_protected_access(node):
if django_version >= (1, 8):
return node.attrname == '_meta'
return False
def is_class(class_name):
"""Shortcut for node_is_subclass."""
return lambda node: node_is_subclass(node, class_name)
def wrap(orig_method, with_method):
def wrap_func(*args, **kwargs):
with_method(orig_method, *args, **kwargs)
return wrap_func
def is_wsgi_application(node):
frame = node.frame()
return node.name == 'application' and isinstance(frame, Module) and \
(frame.name == 'wsgi' or frame.path[0].endswith('wsgi.py') or frame.file.endswith('wsgi.py'))
# Compat helpers
def pylint_newstyle_classdef_compat(linter, warning_name, augment):
if not hasattr(NewStyleConflictChecker, 'visit_classdef'):
return
suppress_message(linter, getattr(NewStyleConflictChecker, 'visit_classdef'), warning_name, augment)
# augment things
def apply_augmentations(linter):
"""Apply augmentation and suppression rules."""
augment_visit(linter, TypeChecker.visit_attribute, foreign_key_sets)
augment_visit(linter, TypeChecker.visit_attribute, foreign_key_ids)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_field_display_method)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_style_attribute)
suppress_message(linter, NameChecker.visit_assignname, 'invalid-name', is_urls_module_valid_constant)
# supress errors when accessing magical class attributes
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_manager_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_admin_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_field_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_charfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_datefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_decimalfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_filefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_imagefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_ipfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_slugfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_foreignkeyfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_manytomanyfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_onetoonefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_form_attribute)
for parents, attrs in VIEW_ATTRS:
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', generic_is_view_attribute(parents, attrs))
# formviews have too many ancestors, there's nothing the user of the library can do about that
suppress_message(linter, MisdesignChecker.visit_classdef, 'too-many-ancestors',
is_class('django.views.generic.edit.FormView'))
# model forms have no __init__ method anywhere in their bases
suppress_message(linter, ClassChecker.visit_classdef, 'W0232', is_class('django.forms.models.ModelForm'))
# Meta
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_meta_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_meta_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'no-init', is_model_meta_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_meta_subclass)
suppress_message(linter, ClassChecker.visit_attribute, 'protected-access', allow_meta_protected_access)
# Media
suppress_message(linter, NameChecker.visit_assignname, 'C0103', is_model_media_valid_attributes)
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_media_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_media_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'no-init', is_model_media_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_media_subclass)
# Admin
# Too many public methods (40+/20)
# TODO: Count public methods of django.contrib.admin.options.ModelAdmin and increase
# MisdesignChecker.config.max_public_methods to this value to count only user' methods.
# nb_public_methods = 0
# for method in node.methods():
# if not method.name.startswith('_'):
# nb_public_methods += 1
suppress_message(linter, MisdesignChecker.leave_classdef, 'R0904', is_model_admin_subclass)
# Tests
suppress_message(linter, MisdesignChecker.leave_classdef, 'R0904', is_model_test_case_subclass)
# View
# Method could be a function (get, post)
suppress_message(linter, ClassChecker.leave_functiondef, 'no-self-use',
is_model_view_subclass_method_shouldnt_be_function)
# Unused argument 'request' (get, post)
suppress_message(linter, VariablesChecker.leave_functiondef, 'unused-argument',
is_model_view_subclass_unused_argument)
suppress_message(linter, VariablesChecker.leave_functiondef, 'unused-argument', is_argument_named_request)
# django-mptt
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_mpttmeta_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_mpttmeta_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'W0232', is_model_mpttmeta_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_mpttmeta_subclass)
# factory_boy's DjangoModelFactory
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_factory)
suppress_message(linter, ClassChecker.visit_functiondef, 'no-self-argument', is_factory_post_generation_method)
# ForeignKey and OneToOneField
# Must update this in a thread safe way to support the parallel option on pylint (-j)
current_leave_module = VariablesChecker.leave_module
if current_leave_module.__name__ == 'leave_module':
# current_leave_module is not wrapped
# Two threads may hit the next assignment concurrently, but the result is the same
VariablesChecker.leave_module = wrap(current_leave_module, ignore_import_warnings_for_related_fields)
# VariablesChecker.leave_module is now wrapped
# else VariablesChecker.leave_module is already wrapped
# wsgi.py
suppress_message(linter, NameChecker.visit_assignname, 'invalid-name', is_wsgi_application)
|
PyCQA/pylint-django
|
pylint_django/augmentations/__init__.py
|
is_model_view_subclass_method_shouldnt_be_function
|
python
|
def is_model_view_subclass_method_shouldnt_be_function(node):
if node.name not in ('get', 'post'):
return False
parent = node.parent
while parent and not isinstance(parent, ScopedClass):
parent = parent.parent
subclass = ('django.views.View',
'django.views.generic.View',
'django.views.generic.base.View',)
return parent is not None and node_is_subclass(parent, *subclass)
|
Checks that node is get or post method of the View class.
|
train
|
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/augmentations/__init__.py#L628-L641
|
[
"def node_is_subclass(cls, *subclass_names):\n \"\"\"Checks if cls node has parent with subclass_name.\"\"\"\n if not isinstance(cls, (ClassDef, Instance)):\n return False\n\n if cls.bases == Uninferable:\n return False\n for base_cls in cls.bases:\n try:\n for inf in base_cls.inferred():\n if inf.qname() in subclass_names:\n return True\n if inf != cls and node_is_subclass(inf, *subclass_names):\n # check up the hierarchy in case we are a subclass of\n # a subclass of a subclass ...\n return True\n except InferenceError:\n continue\n\n return False\n"
] |
"""Augmentations."""
# pylint: disable=invalid-name
import itertools
from astroid import InferenceError
from astroid.objects import Super
from astroid.nodes import ClassDef, ImportFrom, Attribute
from astroid.scoped_nodes import ClassDef as ScopedClass, Module
from pylint.checkers.base import DocStringChecker, NameChecker
from pylint.checkers.design_analysis import MisdesignChecker
from pylint.checkers.classes import ClassChecker
from pylint.checkers.newstyle import NewStyleConflictChecker
from pylint.checkers.variables import VariablesChecker
from pylint.checkers.typecheck import TypeChecker
from pylint.checkers.variables import ScopeConsumer
from pylint_plugin_utils import augment_visit, suppress_message
from django import VERSION as django_version
from django.views.generic.base import View, RedirectView, ContextMixin
from django.views.generic.dates import DateMixin, DayMixin, MonthMixin, WeekMixin, YearMixin
from django.views.generic.detail import SingleObjectMixin, SingleObjectTemplateResponseMixin, TemplateResponseMixin
from django.views.generic.edit import DeletionMixin, FormMixin, ModelFormMixin
from django.views.generic.list import MultipleObjectMixin, MultipleObjectTemplateResponseMixin
from django.utils import termcolors
from pylint_django.utils import node_is_subclass, PY3
# Note: it would have been nice to import the Manager object from Django and
# get its attributes that way - and this used to be the method - but unfortunately
# there's no guarantee that Django is properly configured at that stage, and importing
# anything from the django.db package causes an ImproperlyConfigured exception.
# Therefore we'll fall back on a hard-coded list of attributes which won't be as accurate,
# but this is not 100% accurate anyway.
MANAGER_ATTRS = {
'none',
'all',
'count',
'dates',
'distinct',
'extra',
'get',
'get_or_create',
'get_queryset',
'create',
'bulk_create',
'filter',
'aggregate',
'annotate',
'complex_filter',
'exclude',
'in_bulk',
'iterator',
'latest',
'order_by',
'select_for_update',
'select_related',
'prefetch_related',
'values',
'values_list',
'update',
'reverse',
'defer',
'only',
'using',
'exists',
}
QS_ATTRS = {
'filter',
'exclude',
'annotate',
'order_by',
'reverse',
'distinct',
'values',
'values_list',
'dates',
'datetimes',
'none',
'all',
'select_related',
'prefetch_related',
'extra',
'defer',
'only',
'using',
'select_for_update',
'raw',
'get',
'create',
'get_or_create',
'update_or_create',
'bulk_create',
'count',
'in_bulk',
'iterator',
'latest',
'earliest',
'first',
'last',
'aggregate',
'exists',
'update',
'delete',
'as_manager',
'expression',
'output_field',
}
MODELADMIN_ATTRS = {
# options
'actions',
'actions_on_top',
'actions_on_bottom',
'actions_selection_counter',
'date_hierarchy',
'empty_value_display',
'exclude',
'fields',
'fieldsets',
'filter_horizontal',
'filter_vertical',
'form',
'formfield_overrides',
'inlines',
'list_display',
'list_display_links',
'list_editable',
'list_filter',
'list_max_show_all',
'list_per_page',
'list_select_related',
'ordering',
'paginator',
'prepopulated_fields',
'preserve_filters',
'radio_fields',
'raw_id_fields',
'readonly_fields',
'save_as',
'save_on_top',
'search_fields',
'show_full_result_count',
'view_on_site',
# template options
'add_form_template',
'change_form_template',
'change_list_template',
'delete_confirmation_template',
'delete_selected_confirmation_template',
'object_history_template',
}
MODEL_ATTRS = {
'id',
'DoesNotExist',
'MultipleObjectsReturned',
'_base_manager',
'_default_manager',
'_meta',
'delete',
'get_next_by_date',
'get_previous_by_date',
'objects',
'save',
}
FIELD_ATTRS = {
'null',
'blank',
'choices',
'db_column',
'db_index',
'db_tablespace',
'default',
'editable',
'error_messages',
'help_text',
'primary_key',
'unique',
'unique_for_date',
'unique_for_month',
'unique_for_year',
'verbose_name',
'validators',
}
CHAR_FIELD_ATTRS = {
'max_length',
}
DATE_FIELD_ATTRS = {
'auto_now',
'auto_now_add',
}
DECIMAL_FIELD_ATTRS = {
'max_digits',
'decimal_places',
}
FILE_FIELD_ATTRS = {
'upload_to',
'storage',
}
IMAGE_FIELD_ATTRS = {
'height_field',
'width_field',
}
IP_FIELD_ATTRS = {
'protocol',
'unpack_ipv4',
}
SLUG_FIELD_ATTRS = {
'allow_unicode',
}
FOREIGNKEY_FIELD_ATTRS = {
'limit_choices_to',
'related_name',
'related_query_name',
'to_field',
'db_constraint',
'swappable',
}
MANYTOMANY_FIELD_ATTRS = {
'add',
'clear',
'related_name',
'related_query_name',
'limit_choices_to',
'symmetrical',
'through',
'through_fields',
'db_table',
'db_constraint',
'swappable',
}
ONETOONE_FIELD_ATTRS = {
'parent_link',
}
STYLE_ATTRS = set(itertools.chain.from_iterable(termcolors.PALETTES.values()))
VIEW_ATTRS = {
(
(
'{}.{}'.format(cls.__module__, cls.__name__),
'.{}'.format(cls.__name__)
),
tuple(cls.__dict__.keys())
) for cls in (
View, RedirectView, ContextMixin,
DateMixin, DayMixin, MonthMixin, WeekMixin, YearMixin,
SingleObjectMixin, SingleObjectTemplateResponseMixin, TemplateResponseMixin,
DeletionMixin, FormMixin, ModelFormMixin,
MultipleObjectMixin, MultipleObjectTemplateResponseMixin,
)
}
FORM_ATTRS = {
'declared_fields',
}
def ignore_import_warnings_for_related_fields(orig_method, self, node):
"""
Replaces the leave_module method on the VariablesChecker class to
prevent unused-import warnings which are caused by the ForeignKey
and OneToOneField transformations. By replacing the nodes in the
AST with their type rather than the django field, imports of the
form 'from django.db.models import OneToOneField' raise an unused-import
warning
"""
consumer = self._to_consume[0] # pylint: disable=W0212
# we can disable this warning ('Access to a protected member _to_consume of a client class')
# as it's not actually a client class, but rather, this method is being monkey patched
# onto the class and so the access is valid
new_things = {}
iterat = consumer.to_consume.items if PY3 else consumer.to_consume.iteritems
for name, stmts in iterat():
if isinstance(stmts[0], ImportFrom):
if any([n[0] in ('ForeignKey', 'OneToOneField') for n in stmts[0].names]):
continue
new_things[name] = stmts
consumer._atomic = ScopeConsumer(new_things, consumer.consumed, consumer.scope_type) # pylint: disable=W0212
self._to_consume = [consumer] # pylint: disable=W0212
return orig_method(self, node)
def foreign_key_sets(chain, node):
"""
When a Django model has a ForeignKey to another model, the target
of the foreign key gets a '<modelname>_set' attribute for accessing
a queryset of the model owning the foreign key - eg:
class ModelA(models.Model):
pass
class ModelB(models.Model):
a = models.ForeignKey(ModelA)
Now, ModelA instances will have a modelb_set attribute.
It's also possible to explicitly name the relationship using the related_name argument
to the ForeignKey constructor. As it's impossible to know this without inspecting all
models before processing, we'll instead do a "best guess" approach and see if the attribute
being accessed goes on to be used as a queryset. This is via 'duck typing': if the method
called on the attribute being accessed is something we might find in a queryset, we'll
warn.
"""
quack = False
if node.attrname in MANAGER_ATTRS or node.attrname.endswith('_set'):
# if this is a X_set method, that's a pretty strong signal that this is the default
# Django name, rather than one set by related_name
quack = True
else:
# we will
if isinstance(node.parent, Attribute):
func_name = getattr(node.parent, 'attrname', None)
if func_name in MANAGER_ATTRS:
quack = True
if quack:
children = list(node.get_children())
for child in children:
try:
inferred_cls = child.inferred()
except InferenceError:
pass
else:
for cls in inferred_cls:
if (node_is_subclass(cls,
'django.db.models.manager.Manager',
'django.db.models.base.Model',
'.Model',
'django.db.models.fields.related.ForeignObject')):
# This means that we are looking at a subclass of models.Model
# and something is trying to access a <something>_set attribute.
# Since this could exist, we will return so as not to raise an
# error.
return
chain()
def foreign_key_ids(chain, node):
if node.attrname.endswith('_id'):
return
chain()
def is_model_admin_subclass(node):
"""Checks that node is derivative of ModelAdmin class."""
if node.name[-5:] != 'Admin' or isinstance(node.parent, ClassDef):
return False
return node_is_subclass(node, 'django.contrib.admin.options.ModelAdmin')
def is_model_media_subclass(node):
"""Checks that node is derivative of Media class."""
if node.name != 'Media' or not isinstance(node.parent, ClassDef):
return False
parents = ('django.contrib.admin.options.ModelAdmin',
'django.forms.widgets.Media',
'django.db.models.base.Model',
'.Model', # for the transformed version used in this plugin
'django.forms.forms.Form',
'.Form',
'django.forms.widgets.Widget',
'.Widget',
'django.forms.models.ModelForm',
'.ModelForm')
return node_is_subclass(node.parent, *parents)
def is_model_meta_subclass(node):
"""Checks that node is derivative of Meta class."""
if node.name != 'Meta' or not isinstance(node.parent, ClassDef):
return False
parents = ('.Model', # for the transformed version used here
'django.db.models.base.Model',
'.Form',
'django.forms.forms.Form',
'.ModelForm',
'django.forms.models.ModelForm',
'rest_framework.serializers.BaseSerializer',
'rest_framework.generics.GenericAPIView',
'rest_framework.viewsets.ReadOnlyModelViewSet',
'rest_framework.viewsets.ModelViewSet',
'django_filters.filterset.FilterSet',
'factory.django.DjangoModelFactory',)
return node_is_subclass(node.parent, *parents)
def is_model_factory(node):
"""Checks that node is derivative of DjangoModelFactory or SubFactory class."""
try:
parent_classes = node.expr.inferred()
except: # noqa: E722, pylint: disable=bare-except
return False
parents = ('factory.declarations.LazyFunction',
'factory.declarations.SubFactory',
'factory.django.DjangoModelFactory')
for parent_class in parent_classes:
try:
if parent_class.qname() in parents:
return True
if node_is_subclass(parent_class, *parents):
return True
except AttributeError:
continue
return False
def is_factory_post_generation_method(node):
if not node.decorators:
return False
for decorator in node.decorators.get_children():
try:
inferred = decorator.inferred()
except InferenceError:
continue
for target in inferred:
if target.qname() == 'factory.helpers.post_generation':
return True
return False
def is_model_mpttmeta_subclass(node):
"""Checks that node is derivative of MPTTMeta class."""
if node.name != 'MPTTMeta' or not isinstance(node.parent, ClassDef):
return False
parents = ('django.db.models.base.Model',
'.Model', # for the transformed version used in this plugin
'django.forms.forms.Form',
'.Form',
'django.forms.models.ModelForm',
'.ModelForm')
return node_is_subclass(node.parent, *parents)
def _attribute_is_magic(node, attrs, parents):
"""Checks that node is an attribute used inside one of allowed parents"""
if node.attrname not in attrs:
return False
if not node.last_child():
return False
try:
for cls in node.last_child().inferred():
if isinstance(cls, Super):
cls = cls._self_class # pylint: disable=protected-access
if node_is_subclass(cls, *parents) or cls.qname() in parents:
return True
except InferenceError:
pass
return False
def is_style_attribute(node):
parents = ('django.core.management.color.Style', )
return _attribute_is_magic(node, STYLE_ATTRS, parents)
def is_manager_attribute(node):
"""Checks that node is attribute of Manager or QuerySet class."""
parents = ('django.db.models.manager.Manager',
'.Manager',
'factory.base.BaseFactory.build',
'django.db.models.query.QuerySet',
'.QuerySet')
return _attribute_is_magic(node, MANAGER_ATTRS.union(QS_ATTRS), parents)
def is_admin_attribute(node):
"""Checks that node is attribute of BaseModelAdmin."""
parents = ('django.contrib.admin.options.BaseModelAdmin',
'.BaseModelAdmin')
return _attribute_is_magic(node, MODELADMIN_ATTRS, parents)
def is_model_attribute(node):
"""Checks that node is attribute of Model."""
parents = ('django.db.models.base.Model',
'.Model')
return _attribute_is_magic(node, MODEL_ATTRS, parents)
def is_field_attribute(node):
"""Checks that node is attribute of Field."""
parents = ('django.db.models.fields.Field',
'.Field')
return _attribute_is_magic(node, FIELD_ATTRS, parents)
def is_charfield_attribute(node):
"""Checks that node is attribute of CharField."""
parents = ('django.db.models.fields.CharField',
'.CharField')
return _attribute_is_magic(node, CHAR_FIELD_ATTRS, parents)
def is_datefield_attribute(node):
"""Checks that node is attribute of DateField."""
parents = ('django.db.models.fields.DateField',
'.DateField')
return _attribute_is_magic(node, DATE_FIELD_ATTRS, parents)
def is_decimalfield_attribute(node):
"""Checks that node is attribute of DecimalField."""
parents = ('django.db.models.fields.DecimalField',
'.DecimalField')
return _attribute_is_magic(node, DECIMAL_FIELD_ATTRS, parents)
def is_filefield_attribute(node):
"""Checks that node is attribute of FileField."""
parents = ('django.db.models.fields.files.FileField',
'.FileField')
return _attribute_is_magic(node, FILE_FIELD_ATTRS, parents)
def is_imagefield_attribute(node):
"""Checks that node is attribute of ImageField."""
parents = ('django.db.models.fields.files.ImageField',
'.ImageField')
return _attribute_is_magic(node, IMAGE_FIELD_ATTRS, parents)
def is_ipfield_attribute(node):
"""Checks that node is attribute of GenericIPAddressField."""
parents = ('django.db.models.fields.GenericIPAddressField',
'.GenericIPAddressField')
return _attribute_is_magic(node, IP_FIELD_ATTRS, parents)
def is_slugfield_attribute(node):
"""Checks that node is attribute of SlugField."""
parents = ('django.db.models.fields.SlugField',
'.SlugField')
return _attribute_is_magic(node, SLUG_FIELD_ATTRS, parents)
def is_foreignkeyfield_attribute(node):
"""Checks that node is attribute of ForeignKey."""
parents = ('django.db.models.fields.related.ForeignKey',
'.ForeignKey')
return _attribute_is_magic(node, FOREIGNKEY_FIELD_ATTRS, parents)
def is_manytomanyfield_attribute(node):
"""Checks that node is attribute of ManyToManyField."""
parents = ('django.db.models.fields.related.ManyToManyField',
'.ManyToManyField')
return _attribute_is_magic(node, MANYTOMANY_FIELD_ATTRS, parents)
def is_onetoonefield_attribute(node):
"""Checks that node is attribute of OneToOneField."""
parents = ('django.db.models.fields.related.OneToOneField',
'.OneToOneField')
return _attribute_is_magic(node, ONETOONE_FIELD_ATTRS, parents)
def is_form_attribute(node):
"""Checks that node is attribute of Form."""
parents = ('django.forms.forms.Form', 'django.forms.models.ModelForm')
return _attribute_is_magic(node, FORM_ATTRS, parents)
def is_model_test_case_subclass(node):
"""Checks that node is derivative of TestCase class."""
if not node.name.endswith('Test') and not isinstance(node.parent, ClassDef):
return False
return node_is_subclass(node, 'django.test.testcases.TestCase')
def generic_is_view_attribute(parents, attrs):
"""Generates is_X_attribute function for given parents and attrs."""
def is_attribute(node):
return _attribute_is_magic(node, attrs, parents)
return is_attribute
def is_model_view_subclass_unused_argument(node):
"""
Checks that node is get or post method of the View class and it has valid arguments.
TODO: Bad checkings, need to be more smart.
"""
if not is_model_view_subclass_method_shouldnt_be_function(node):
return False
return is_argument_named_request(node)
def is_argument_named_request(node):
"""
If an unused-argument is named 'request' ignore that!
"""
return 'request' in node.argnames()
def is_model_field_display_method(node):
"""Accept model's fields with get_*_display names."""
if not node.attrname.endswith('_display'):
return False
if not node.attrname.startswith('get_'):
return False
if node.last_child():
# TODO: could validate the names of the fields on the model rather than
# blindly accepting get_*_display
try:
for cls in node.last_child().inferred():
if node_is_subclass(cls, 'django.db.models.base.Model', '.Model'):
return True
except InferenceError:
return False
return False
def is_model_media_valid_attributes(node):
"""Suppress warnings for valid attributes of Media class."""
if node.name not in ('js', ):
return False
parent = node.parent
while parent and not isinstance(parent, ScopedClass):
parent = parent.parent
if parent is None or parent.name != "Media":
return False
return True
def is_templatetags_module_valid_constant(node):
"""Suppress warnings for valid constants in templatetags module."""
if node.name not in ('register', ):
return False
parent = node.parent
while not isinstance(parent, Module):
parent = parent.parent
if "templatetags." not in parent.name:
return False
return True
def is_urls_module_valid_constant(node):
"""Suppress warnings for valid constants in urls module."""
if node.name not in ('urlpatterns', 'app_name'):
return False
parent = node.parent
while not isinstance(parent, Module):
parent = parent.parent
if not parent.name.endswith('urls'):
return False
return True
def allow_meta_protected_access(node):
if django_version >= (1, 8):
return node.attrname == '_meta'
return False
def is_class(class_name):
"""Shortcut for node_is_subclass."""
return lambda node: node_is_subclass(node, class_name)
def wrap(orig_method, with_method):
def wrap_func(*args, **kwargs):
with_method(orig_method, *args, **kwargs)
return wrap_func
def is_wsgi_application(node):
frame = node.frame()
return node.name == 'application' and isinstance(frame, Module) and \
(frame.name == 'wsgi' or frame.path[0].endswith('wsgi.py') or frame.file.endswith('wsgi.py'))
# Compat helpers
def pylint_newstyle_classdef_compat(linter, warning_name, augment):
if not hasattr(NewStyleConflictChecker, 'visit_classdef'):
return
suppress_message(linter, getattr(NewStyleConflictChecker, 'visit_classdef'), warning_name, augment)
# augment things
def apply_augmentations(linter):
"""Apply augmentation and suppression rules."""
augment_visit(linter, TypeChecker.visit_attribute, foreign_key_sets)
augment_visit(linter, TypeChecker.visit_attribute, foreign_key_ids)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_field_display_method)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_style_attribute)
suppress_message(linter, NameChecker.visit_assignname, 'invalid-name', is_urls_module_valid_constant)
# supress errors when accessing magical class attributes
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_manager_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_admin_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_field_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_charfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_datefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_decimalfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_filefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_imagefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_ipfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_slugfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_foreignkeyfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_manytomanyfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_onetoonefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_form_attribute)
for parents, attrs in VIEW_ATTRS:
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', generic_is_view_attribute(parents, attrs))
# formviews have too many ancestors, there's nothing the user of the library can do about that
suppress_message(linter, MisdesignChecker.visit_classdef, 'too-many-ancestors',
is_class('django.views.generic.edit.FormView'))
# model forms have no __init__ method anywhere in their bases
suppress_message(linter, ClassChecker.visit_classdef, 'W0232', is_class('django.forms.models.ModelForm'))
# Meta
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_meta_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_meta_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'no-init', is_model_meta_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_meta_subclass)
suppress_message(linter, ClassChecker.visit_attribute, 'protected-access', allow_meta_protected_access)
# Media
suppress_message(linter, NameChecker.visit_assignname, 'C0103', is_model_media_valid_attributes)
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_media_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_media_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'no-init', is_model_media_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_media_subclass)
# Admin
# Too many public methods (40+/20)
# TODO: Count public methods of django.contrib.admin.options.ModelAdmin and increase
# MisdesignChecker.config.max_public_methods to this value to count only user' methods.
# nb_public_methods = 0
# for method in node.methods():
# if not method.name.startswith('_'):
# nb_public_methods += 1
suppress_message(linter, MisdesignChecker.leave_classdef, 'R0904', is_model_admin_subclass)
# Tests
suppress_message(linter, MisdesignChecker.leave_classdef, 'R0904', is_model_test_case_subclass)
# View
# Method could be a function (get, post)
suppress_message(linter, ClassChecker.leave_functiondef, 'no-self-use',
is_model_view_subclass_method_shouldnt_be_function)
# Unused argument 'request' (get, post)
suppress_message(linter, VariablesChecker.leave_functiondef, 'unused-argument',
is_model_view_subclass_unused_argument)
suppress_message(linter, VariablesChecker.leave_functiondef, 'unused-argument', is_argument_named_request)
# django-mptt
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_mpttmeta_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_mpttmeta_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'W0232', is_model_mpttmeta_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_mpttmeta_subclass)
# factory_boy's DjangoModelFactory
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_factory)
suppress_message(linter, ClassChecker.visit_functiondef, 'no-self-argument', is_factory_post_generation_method)
# ForeignKey and OneToOneField
# Must update this in a thread safe way to support the parallel option on pylint (-j)
current_leave_module = VariablesChecker.leave_module
if current_leave_module.__name__ == 'leave_module':
# current_leave_module is not wrapped
# Two threads may hit the next assignment concurrently, but the result is the same
VariablesChecker.leave_module = wrap(current_leave_module, ignore_import_warnings_for_related_fields)
# VariablesChecker.leave_module is now wrapped
# else VariablesChecker.leave_module is already wrapped
# wsgi.py
suppress_message(linter, NameChecker.visit_assignname, 'invalid-name', is_wsgi_application)
|
PyCQA/pylint-django
|
pylint_django/augmentations/__init__.py
|
is_model_field_display_method
|
python
|
def is_model_field_display_method(node):
if not node.attrname.endswith('_display'):
return False
if not node.attrname.startswith('get_'):
return False
if node.last_child():
# TODO: could validate the names of the fields on the model rather than
# blindly accepting get_*_display
try:
for cls in node.last_child().inferred():
if node_is_subclass(cls, 'django.db.models.base.Model', '.Model'):
return True
except InferenceError:
return False
return False
|
Accept model's fields with get_*_display names.
|
train
|
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/augmentations/__init__.py#L663-L679
| null |
"""Augmentations."""
# pylint: disable=invalid-name
import itertools
from astroid import InferenceError
from astroid.objects import Super
from astroid.nodes import ClassDef, ImportFrom, Attribute
from astroid.scoped_nodes import ClassDef as ScopedClass, Module
from pylint.checkers.base import DocStringChecker, NameChecker
from pylint.checkers.design_analysis import MisdesignChecker
from pylint.checkers.classes import ClassChecker
from pylint.checkers.newstyle import NewStyleConflictChecker
from pylint.checkers.variables import VariablesChecker
from pylint.checkers.typecheck import TypeChecker
from pylint.checkers.variables import ScopeConsumer
from pylint_plugin_utils import augment_visit, suppress_message
from django import VERSION as django_version
from django.views.generic.base import View, RedirectView, ContextMixin
from django.views.generic.dates import DateMixin, DayMixin, MonthMixin, WeekMixin, YearMixin
from django.views.generic.detail import SingleObjectMixin, SingleObjectTemplateResponseMixin, TemplateResponseMixin
from django.views.generic.edit import DeletionMixin, FormMixin, ModelFormMixin
from django.views.generic.list import MultipleObjectMixin, MultipleObjectTemplateResponseMixin
from django.utils import termcolors
from pylint_django.utils import node_is_subclass, PY3
# Note: it would have been nice to import the Manager object from Django and
# get its attributes that way - and this used to be the method - but unfortunately
# there's no guarantee that Django is properly configured at that stage, and importing
# anything from the django.db package causes an ImproperlyConfigured exception.
# Therefore we'll fall back on a hard-coded list of attributes which won't be as accurate,
# but this is not 100% accurate anyway.
MANAGER_ATTRS = {
'none',
'all',
'count',
'dates',
'distinct',
'extra',
'get',
'get_or_create',
'get_queryset',
'create',
'bulk_create',
'filter',
'aggregate',
'annotate',
'complex_filter',
'exclude',
'in_bulk',
'iterator',
'latest',
'order_by',
'select_for_update',
'select_related',
'prefetch_related',
'values',
'values_list',
'update',
'reverse',
'defer',
'only',
'using',
'exists',
}
QS_ATTRS = {
'filter',
'exclude',
'annotate',
'order_by',
'reverse',
'distinct',
'values',
'values_list',
'dates',
'datetimes',
'none',
'all',
'select_related',
'prefetch_related',
'extra',
'defer',
'only',
'using',
'select_for_update',
'raw',
'get',
'create',
'get_or_create',
'update_or_create',
'bulk_create',
'count',
'in_bulk',
'iterator',
'latest',
'earliest',
'first',
'last',
'aggregate',
'exists',
'update',
'delete',
'as_manager',
'expression',
'output_field',
}
MODELADMIN_ATTRS = {
# options
'actions',
'actions_on_top',
'actions_on_bottom',
'actions_selection_counter',
'date_hierarchy',
'empty_value_display',
'exclude',
'fields',
'fieldsets',
'filter_horizontal',
'filter_vertical',
'form',
'formfield_overrides',
'inlines',
'list_display',
'list_display_links',
'list_editable',
'list_filter',
'list_max_show_all',
'list_per_page',
'list_select_related',
'ordering',
'paginator',
'prepopulated_fields',
'preserve_filters',
'radio_fields',
'raw_id_fields',
'readonly_fields',
'save_as',
'save_on_top',
'search_fields',
'show_full_result_count',
'view_on_site',
# template options
'add_form_template',
'change_form_template',
'change_list_template',
'delete_confirmation_template',
'delete_selected_confirmation_template',
'object_history_template',
}
MODEL_ATTRS = {
'id',
'DoesNotExist',
'MultipleObjectsReturned',
'_base_manager',
'_default_manager',
'_meta',
'delete',
'get_next_by_date',
'get_previous_by_date',
'objects',
'save',
}
FIELD_ATTRS = {
'null',
'blank',
'choices',
'db_column',
'db_index',
'db_tablespace',
'default',
'editable',
'error_messages',
'help_text',
'primary_key',
'unique',
'unique_for_date',
'unique_for_month',
'unique_for_year',
'verbose_name',
'validators',
}
CHAR_FIELD_ATTRS = {
'max_length',
}
DATE_FIELD_ATTRS = {
'auto_now',
'auto_now_add',
}
DECIMAL_FIELD_ATTRS = {
'max_digits',
'decimal_places',
}
FILE_FIELD_ATTRS = {
'upload_to',
'storage',
}
IMAGE_FIELD_ATTRS = {
'height_field',
'width_field',
}
IP_FIELD_ATTRS = {
'protocol',
'unpack_ipv4',
}
SLUG_FIELD_ATTRS = {
'allow_unicode',
}
FOREIGNKEY_FIELD_ATTRS = {
'limit_choices_to',
'related_name',
'related_query_name',
'to_field',
'db_constraint',
'swappable',
}
MANYTOMANY_FIELD_ATTRS = {
'add',
'clear',
'related_name',
'related_query_name',
'limit_choices_to',
'symmetrical',
'through',
'through_fields',
'db_table',
'db_constraint',
'swappable',
}
ONETOONE_FIELD_ATTRS = {
'parent_link',
}
STYLE_ATTRS = set(itertools.chain.from_iterable(termcolors.PALETTES.values()))
VIEW_ATTRS = {
(
(
'{}.{}'.format(cls.__module__, cls.__name__),
'.{}'.format(cls.__name__)
),
tuple(cls.__dict__.keys())
) for cls in (
View, RedirectView, ContextMixin,
DateMixin, DayMixin, MonthMixin, WeekMixin, YearMixin,
SingleObjectMixin, SingleObjectTemplateResponseMixin, TemplateResponseMixin,
DeletionMixin, FormMixin, ModelFormMixin,
MultipleObjectMixin, MultipleObjectTemplateResponseMixin,
)
}
FORM_ATTRS = {
'declared_fields',
}
def ignore_import_warnings_for_related_fields(orig_method, self, node):
"""
Replaces the leave_module method on the VariablesChecker class to
prevent unused-import warnings which are caused by the ForeignKey
and OneToOneField transformations. By replacing the nodes in the
AST with their type rather than the django field, imports of the
form 'from django.db.models import OneToOneField' raise an unused-import
warning
"""
consumer = self._to_consume[0] # pylint: disable=W0212
# we can disable this warning ('Access to a protected member _to_consume of a client class')
# as it's not actually a client class, but rather, this method is being monkey patched
# onto the class and so the access is valid
new_things = {}
iterat = consumer.to_consume.items if PY3 else consumer.to_consume.iteritems
for name, stmts in iterat():
if isinstance(stmts[0], ImportFrom):
if any([n[0] in ('ForeignKey', 'OneToOneField') for n in stmts[0].names]):
continue
new_things[name] = stmts
consumer._atomic = ScopeConsumer(new_things, consumer.consumed, consumer.scope_type) # pylint: disable=W0212
self._to_consume = [consumer] # pylint: disable=W0212
return orig_method(self, node)
def foreign_key_sets(chain, node):
"""
When a Django model has a ForeignKey to another model, the target
of the foreign key gets a '<modelname>_set' attribute for accessing
a queryset of the model owning the foreign key - eg:
class ModelA(models.Model):
pass
class ModelB(models.Model):
a = models.ForeignKey(ModelA)
Now, ModelA instances will have a modelb_set attribute.
It's also possible to explicitly name the relationship using the related_name argument
to the ForeignKey constructor. As it's impossible to know this without inspecting all
models before processing, we'll instead do a "best guess" approach and see if the attribute
being accessed goes on to be used as a queryset. This is via 'duck typing': if the method
called on the attribute being accessed is something we might find in a queryset, we'll
warn.
"""
quack = False
if node.attrname in MANAGER_ATTRS or node.attrname.endswith('_set'):
# if this is a X_set method, that's a pretty strong signal that this is the default
# Django name, rather than one set by related_name
quack = True
else:
# we will
if isinstance(node.parent, Attribute):
func_name = getattr(node.parent, 'attrname', None)
if func_name in MANAGER_ATTRS:
quack = True
if quack:
children = list(node.get_children())
for child in children:
try:
inferred_cls = child.inferred()
except InferenceError:
pass
else:
for cls in inferred_cls:
if (node_is_subclass(cls,
'django.db.models.manager.Manager',
'django.db.models.base.Model',
'.Model',
'django.db.models.fields.related.ForeignObject')):
# This means that we are looking at a subclass of models.Model
# and something is trying to access a <something>_set attribute.
# Since this could exist, we will return so as not to raise an
# error.
return
chain()
def foreign_key_ids(chain, node):
if node.attrname.endswith('_id'):
return
chain()
def is_model_admin_subclass(node):
"""Checks that node is derivative of ModelAdmin class."""
if node.name[-5:] != 'Admin' or isinstance(node.parent, ClassDef):
return False
return node_is_subclass(node, 'django.contrib.admin.options.ModelAdmin')
def is_model_media_subclass(node):
"""Checks that node is derivative of Media class."""
if node.name != 'Media' or not isinstance(node.parent, ClassDef):
return False
parents = ('django.contrib.admin.options.ModelAdmin',
'django.forms.widgets.Media',
'django.db.models.base.Model',
'.Model', # for the transformed version used in this plugin
'django.forms.forms.Form',
'.Form',
'django.forms.widgets.Widget',
'.Widget',
'django.forms.models.ModelForm',
'.ModelForm')
return node_is_subclass(node.parent, *parents)
def is_model_meta_subclass(node):
"""Checks that node is derivative of Meta class."""
if node.name != 'Meta' or not isinstance(node.parent, ClassDef):
return False
parents = ('.Model', # for the transformed version used here
'django.db.models.base.Model',
'.Form',
'django.forms.forms.Form',
'.ModelForm',
'django.forms.models.ModelForm',
'rest_framework.serializers.BaseSerializer',
'rest_framework.generics.GenericAPIView',
'rest_framework.viewsets.ReadOnlyModelViewSet',
'rest_framework.viewsets.ModelViewSet',
'django_filters.filterset.FilterSet',
'factory.django.DjangoModelFactory',)
return node_is_subclass(node.parent, *parents)
def is_model_factory(node):
"""Checks that node is derivative of DjangoModelFactory or SubFactory class."""
try:
parent_classes = node.expr.inferred()
except: # noqa: E722, pylint: disable=bare-except
return False
parents = ('factory.declarations.LazyFunction',
'factory.declarations.SubFactory',
'factory.django.DjangoModelFactory')
for parent_class in parent_classes:
try:
if parent_class.qname() in parents:
return True
if node_is_subclass(parent_class, *parents):
return True
except AttributeError:
continue
return False
def is_factory_post_generation_method(node):
if not node.decorators:
return False
for decorator in node.decorators.get_children():
try:
inferred = decorator.inferred()
except InferenceError:
continue
for target in inferred:
if target.qname() == 'factory.helpers.post_generation':
return True
return False
def is_model_mpttmeta_subclass(node):
"""Checks that node is derivative of MPTTMeta class."""
if node.name != 'MPTTMeta' or not isinstance(node.parent, ClassDef):
return False
parents = ('django.db.models.base.Model',
'.Model', # for the transformed version used in this plugin
'django.forms.forms.Form',
'.Form',
'django.forms.models.ModelForm',
'.ModelForm')
return node_is_subclass(node.parent, *parents)
def _attribute_is_magic(node, attrs, parents):
"""Checks that node is an attribute used inside one of allowed parents"""
if node.attrname not in attrs:
return False
if not node.last_child():
return False
try:
for cls in node.last_child().inferred():
if isinstance(cls, Super):
cls = cls._self_class # pylint: disable=protected-access
if node_is_subclass(cls, *parents) or cls.qname() in parents:
return True
except InferenceError:
pass
return False
def is_style_attribute(node):
parents = ('django.core.management.color.Style', )
return _attribute_is_magic(node, STYLE_ATTRS, parents)
def is_manager_attribute(node):
"""Checks that node is attribute of Manager or QuerySet class."""
parents = ('django.db.models.manager.Manager',
'.Manager',
'factory.base.BaseFactory.build',
'django.db.models.query.QuerySet',
'.QuerySet')
return _attribute_is_magic(node, MANAGER_ATTRS.union(QS_ATTRS), parents)
def is_admin_attribute(node):
"""Checks that node is attribute of BaseModelAdmin."""
parents = ('django.contrib.admin.options.BaseModelAdmin',
'.BaseModelAdmin')
return _attribute_is_magic(node, MODELADMIN_ATTRS, parents)
def is_model_attribute(node):
"""Checks that node is attribute of Model."""
parents = ('django.db.models.base.Model',
'.Model')
return _attribute_is_magic(node, MODEL_ATTRS, parents)
def is_field_attribute(node):
"""Checks that node is attribute of Field."""
parents = ('django.db.models.fields.Field',
'.Field')
return _attribute_is_magic(node, FIELD_ATTRS, parents)
def is_charfield_attribute(node):
"""Checks that node is attribute of CharField."""
parents = ('django.db.models.fields.CharField',
'.CharField')
return _attribute_is_magic(node, CHAR_FIELD_ATTRS, parents)
def is_datefield_attribute(node):
"""Checks that node is attribute of DateField."""
parents = ('django.db.models.fields.DateField',
'.DateField')
return _attribute_is_magic(node, DATE_FIELD_ATTRS, parents)
def is_decimalfield_attribute(node):
"""Checks that node is attribute of DecimalField."""
parents = ('django.db.models.fields.DecimalField',
'.DecimalField')
return _attribute_is_magic(node, DECIMAL_FIELD_ATTRS, parents)
def is_filefield_attribute(node):
"""Checks that node is attribute of FileField."""
parents = ('django.db.models.fields.files.FileField',
'.FileField')
return _attribute_is_magic(node, FILE_FIELD_ATTRS, parents)
def is_imagefield_attribute(node):
"""Checks that node is attribute of ImageField."""
parents = ('django.db.models.fields.files.ImageField',
'.ImageField')
return _attribute_is_magic(node, IMAGE_FIELD_ATTRS, parents)
def is_ipfield_attribute(node):
"""Checks that node is attribute of GenericIPAddressField."""
parents = ('django.db.models.fields.GenericIPAddressField',
'.GenericIPAddressField')
return _attribute_is_magic(node, IP_FIELD_ATTRS, parents)
def is_slugfield_attribute(node):
"""Checks that node is attribute of SlugField."""
parents = ('django.db.models.fields.SlugField',
'.SlugField')
return _attribute_is_magic(node, SLUG_FIELD_ATTRS, parents)
def is_foreignkeyfield_attribute(node):
"""Checks that node is attribute of ForeignKey."""
parents = ('django.db.models.fields.related.ForeignKey',
'.ForeignKey')
return _attribute_is_magic(node, FOREIGNKEY_FIELD_ATTRS, parents)
def is_manytomanyfield_attribute(node):
"""Checks that node is attribute of ManyToManyField."""
parents = ('django.db.models.fields.related.ManyToManyField',
'.ManyToManyField')
return _attribute_is_magic(node, MANYTOMANY_FIELD_ATTRS, parents)
def is_onetoonefield_attribute(node):
"""Checks that node is attribute of OneToOneField."""
parents = ('django.db.models.fields.related.OneToOneField',
'.OneToOneField')
return _attribute_is_magic(node, ONETOONE_FIELD_ATTRS, parents)
def is_form_attribute(node):
"""Checks that node is attribute of Form."""
parents = ('django.forms.forms.Form', 'django.forms.models.ModelForm')
return _attribute_is_magic(node, FORM_ATTRS, parents)
def is_model_test_case_subclass(node):
"""Checks that node is derivative of TestCase class."""
if not node.name.endswith('Test') and not isinstance(node.parent, ClassDef):
return False
return node_is_subclass(node, 'django.test.testcases.TestCase')
def generic_is_view_attribute(parents, attrs):
"""Generates is_X_attribute function for given parents and attrs."""
def is_attribute(node):
return _attribute_is_magic(node, attrs, parents)
return is_attribute
def is_model_view_subclass_method_shouldnt_be_function(node):
"""Checks that node is get or post method of the View class."""
if node.name not in ('get', 'post'):
return False
parent = node.parent
while parent and not isinstance(parent, ScopedClass):
parent = parent.parent
subclass = ('django.views.View',
'django.views.generic.View',
'django.views.generic.base.View',)
return parent is not None and node_is_subclass(parent, *subclass)
def is_model_view_subclass_unused_argument(node):
"""
Checks that node is get or post method of the View class and it has valid arguments.
TODO: Bad checkings, need to be more smart.
"""
if not is_model_view_subclass_method_shouldnt_be_function(node):
return False
return is_argument_named_request(node)
def is_argument_named_request(node):
"""
If an unused-argument is named 'request' ignore that!
"""
return 'request' in node.argnames()
def is_model_media_valid_attributes(node):
"""Suppress warnings for valid attributes of Media class."""
if node.name not in ('js', ):
return False
parent = node.parent
while parent and not isinstance(parent, ScopedClass):
parent = parent.parent
if parent is None or parent.name != "Media":
return False
return True
def is_templatetags_module_valid_constant(node):
"""Suppress warnings for valid constants in templatetags module."""
if node.name not in ('register', ):
return False
parent = node.parent
while not isinstance(parent, Module):
parent = parent.parent
if "templatetags." not in parent.name:
return False
return True
def is_urls_module_valid_constant(node):
"""Suppress warnings for valid constants in urls module."""
if node.name not in ('urlpatterns', 'app_name'):
return False
parent = node.parent
while not isinstance(parent, Module):
parent = parent.parent
if not parent.name.endswith('urls'):
return False
return True
def allow_meta_protected_access(node):
if django_version >= (1, 8):
return node.attrname == '_meta'
return False
def is_class(class_name):
"""Shortcut for node_is_subclass."""
return lambda node: node_is_subclass(node, class_name)
def wrap(orig_method, with_method):
def wrap_func(*args, **kwargs):
with_method(orig_method, *args, **kwargs)
return wrap_func
def is_wsgi_application(node):
frame = node.frame()
return node.name == 'application' and isinstance(frame, Module) and \
(frame.name == 'wsgi' or frame.path[0].endswith('wsgi.py') or frame.file.endswith('wsgi.py'))
# Compat helpers
def pylint_newstyle_classdef_compat(linter, warning_name, augment):
if not hasattr(NewStyleConflictChecker, 'visit_classdef'):
return
suppress_message(linter, getattr(NewStyleConflictChecker, 'visit_classdef'), warning_name, augment)
# augment things
def apply_augmentations(linter):
"""Apply augmentation and suppression rules."""
augment_visit(linter, TypeChecker.visit_attribute, foreign_key_sets)
augment_visit(linter, TypeChecker.visit_attribute, foreign_key_ids)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_field_display_method)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_style_attribute)
suppress_message(linter, NameChecker.visit_assignname, 'invalid-name', is_urls_module_valid_constant)
# supress errors when accessing magical class attributes
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_manager_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_admin_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_field_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_charfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_datefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_decimalfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_filefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_imagefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_ipfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_slugfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_foreignkeyfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_manytomanyfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_onetoonefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_form_attribute)
for parents, attrs in VIEW_ATTRS:
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', generic_is_view_attribute(parents, attrs))
# formviews have too many ancestors, there's nothing the user of the library can do about that
suppress_message(linter, MisdesignChecker.visit_classdef, 'too-many-ancestors',
is_class('django.views.generic.edit.FormView'))
# model forms have no __init__ method anywhere in their bases
suppress_message(linter, ClassChecker.visit_classdef, 'W0232', is_class('django.forms.models.ModelForm'))
# Meta
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_meta_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_meta_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'no-init', is_model_meta_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_meta_subclass)
suppress_message(linter, ClassChecker.visit_attribute, 'protected-access', allow_meta_protected_access)
# Media
suppress_message(linter, NameChecker.visit_assignname, 'C0103', is_model_media_valid_attributes)
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_media_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_media_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'no-init', is_model_media_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_media_subclass)
# Admin
# Too many public methods (40+/20)
# TODO: Count public methods of django.contrib.admin.options.ModelAdmin and increase
# MisdesignChecker.config.max_public_methods to this value to count only user' methods.
# nb_public_methods = 0
# for method in node.methods():
# if not method.name.startswith('_'):
# nb_public_methods += 1
suppress_message(linter, MisdesignChecker.leave_classdef, 'R0904', is_model_admin_subclass)
# Tests
suppress_message(linter, MisdesignChecker.leave_classdef, 'R0904', is_model_test_case_subclass)
# View
# Method could be a function (get, post)
suppress_message(linter, ClassChecker.leave_functiondef, 'no-self-use',
is_model_view_subclass_method_shouldnt_be_function)
# Unused argument 'request' (get, post)
suppress_message(linter, VariablesChecker.leave_functiondef, 'unused-argument',
is_model_view_subclass_unused_argument)
suppress_message(linter, VariablesChecker.leave_functiondef, 'unused-argument', is_argument_named_request)
# django-mptt
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_mpttmeta_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_mpttmeta_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'W0232', is_model_mpttmeta_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_mpttmeta_subclass)
# factory_boy's DjangoModelFactory
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_factory)
suppress_message(linter, ClassChecker.visit_functiondef, 'no-self-argument', is_factory_post_generation_method)
# ForeignKey and OneToOneField
# Must update this in a thread safe way to support the parallel option on pylint (-j)
current_leave_module = VariablesChecker.leave_module
if current_leave_module.__name__ == 'leave_module':
# current_leave_module is not wrapped
# Two threads may hit the next assignment concurrently, but the result is the same
VariablesChecker.leave_module = wrap(current_leave_module, ignore_import_warnings_for_related_fields)
# VariablesChecker.leave_module is now wrapped
# else VariablesChecker.leave_module is already wrapped
# wsgi.py
suppress_message(linter, NameChecker.visit_assignname, 'invalid-name', is_wsgi_application)
|
PyCQA/pylint-django
|
pylint_django/augmentations/__init__.py
|
is_model_media_valid_attributes
|
python
|
def is_model_media_valid_attributes(node):
if node.name not in ('js', ):
return False
parent = node.parent
while parent and not isinstance(parent, ScopedClass):
parent = parent.parent
if parent is None or parent.name != "Media":
return False
return True
|
Suppress warnings for valid attributes of Media class.
|
train
|
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/augmentations/__init__.py#L682-L694
| null |
"""Augmentations."""
# pylint: disable=invalid-name
import itertools
from astroid import InferenceError
from astroid.objects import Super
from astroid.nodes import ClassDef, ImportFrom, Attribute
from astroid.scoped_nodes import ClassDef as ScopedClass, Module
from pylint.checkers.base import DocStringChecker, NameChecker
from pylint.checkers.design_analysis import MisdesignChecker
from pylint.checkers.classes import ClassChecker
from pylint.checkers.newstyle import NewStyleConflictChecker
from pylint.checkers.variables import VariablesChecker
from pylint.checkers.typecheck import TypeChecker
from pylint.checkers.variables import ScopeConsumer
from pylint_plugin_utils import augment_visit, suppress_message
from django import VERSION as django_version
from django.views.generic.base import View, RedirectView, ContextMixin
from django.views.generic.dates import DateMixin, DayMixin, MonthMixin, WeekMixin, YearMixin
from django.views.generic.detail import SingleObjectMixin, SingleObjectTemplateResponseMixin, TemplateResponseMixin
from django.views.generic.edit import DeletionMixin, FormMixin, ModelFormMixin
from django.views.generic.list import MultipleObjectMixin, MultipleObjectTemplateResponseMixin
from django.utils import termcolors
from pylint_django.utils import node_is_subclass, PY3
# Note: it would have been nice to import the Manager object from Django and
# get its attributes that way - and this used to be the method - but unfortunately
# there's no guarantee that Django is properly configured at that stage, and importing
# anything from the django.db package causes an ImproperlyConfigured exception.
# Therefore we'll fall back on a hard-coded list of attributes which won't be as accurate,
# but this is not 100% accurate anyway.
MANAGER_ATTRS = {
'none',
'all',
'count',
'dates',
'distinct',
'extra',
'get',
'get_or_create',
'get_queryset',
'create',
'bulk_create',
'filter',
'aggregate',
'annotate',
'complex_filter',
'exclude',
'in_bulk',
'iterator',
'latest',
'order_by',
'select_for_update',
'select_related',
'prefetch_related',
'values',
'values_list',
'update',
'reverse',
'defer',
'only',
'using',
'exists',
}
QS_ATTRS = {
'filter',
'exclude',
'annotate',
'order_by',
'reverse',
'distinct',
'values',
'values_list',
'dates',
'datetimes',
'none',
'all',
'select_related',
'prefetch_related',
'extra',
'defer',
'only',
'using',
'select_for_update',
'raw',
'get',
'create',
'get_or_create',
'update_or_create',
'bulk_create',
'count',
'in_bulk',
'iterator',
'latest',
'earliest',
'first',
'last',
'aggregate',
'exists',
'update',
'delete',
'as_manager',
'expression',
'output_field',
}
MODELADMIN_ATTRS = {
# options
'actions',
'actions_on_top',
'actions_on_bottom',
'actions_selection_counter',
'date_hierarchy',
'empty_value_display',
'exclude',
'fields',
'fieldsets',
'filter_horizontal',
'filter_vertical',
'form',
'formfield_overrides',
'inlines',
'list_display',
'list_display_links',
'list_editable',
'list_filter',
'list_max_show_all',
'list_per_page',
'list_select_related',
'ordering',
'paginator',
'prepopulated_fields',
'preserve_filters',
'radio_fields',
'raw_id_fields',
'readonly_fields',
'save_as',
'save_on_top',
'search_fields',
'show_full_result_count',
'view_on_site',
# template options
'add_form_template',
'change_form_template',
'change_list_template',
'delete_confirmation_template',
'delete_selected_confirmation_template',
'object_history_template',
}
MODEL_ATTRS = {
'id',
'DoesNotExist',
'MultipleObjectsReturned',
'_base_manager',
'_default_manager',
'_meta',
'delete',
'get_next_by_date',
'get_previous_by_date',
'objects',
'save',
}
FIELD_ATTRS = {
'null',
'blank',
'choices',
'db_column',
'db_index',
'db_tablespace',
'default',
'editable',
'error_messages',
'help_text',
'primary_key',
'unique',
'unique_for_date',
'unique_for_month',
'unique_for_year',
'verbose_name',
'validators',
}
CHAR_FIELD_ATTRS = {
'max_length',
}
DATE_FIELD_ATTRS = {
'auto_now',
'auto_now_add',
}
DECIMAL_FIELD_ATTRS = {
'max_digits',
'decimal_places',
}
FILE_FIELD_ATTRS = {
'upload_to',
'storage',
}
IMAGE_FIELD_ATTRS = {
'height_field',
'width_field',
}
IP_FIELD_ATTRS = {
'protocol',
'unpack_ipv4',
}
SLUG_FIELD_ATTRS = {
'allow_unicode',
}
FOREIGNKEY_FIELD_ATTRS = {
'limit_choices_to',
'related_name',
'related_query_name',
'to_field',
'db_constraint',
'swappable',
}
MANYTOMANY_FIELD_ATTRS = {
'add',
'clear',
'related_name',
'related_query_name',
'limit_choices_to',
'symmetrical',
'through',
'through_fields',
'db_table',
'db_constraint',
'swappable',
}
ONETOONE_FIELD_ATTRS = {
'parent_link',
}
STYLE_ATTRS = set(itertools.chain.from_iterable(termcolors.PALETTES.values()))
VIEW_ATTRS = {
(
(
'{}.{}'.format(cls.__module__, cls.__name__),
'.{}'.format(cls.__name__)
),
tuple(cls.__dict__.keys())
) for cls in (
View, RedirectView, ContextMixin,
DateMixin, DayMixin, MonthMixin, WeekMixin, YearMixin,
SingleObjectMixin, SingleObjectTemplateResponseMixin, TemplateResponseMixin,
DeletionMixin, FormMixin, ModelFormMixin,
MultipleObjectMixin, MultipleObjectTemplateResponseMixin,
)
}
FORM_ATTRS = {
'declared_fields',
}
def ignore_import_warnings_for_related_fields(orig_method, self, node):
"""
Replaces the leave_module method on the VariablesChecker class to
prevent unused-import warnings which are caused by the ForeignKey
and OneToOneField transformations. By replacing the nodes in the
AST with their type rather than the django field, imports of the
form 'from django.db.models import OneToOneField' raise an unused-import
warning
"""
consumer = self._to_consume[0] # pylint: disable=W0212
# we can disable this warning ('Access to a protected member _to_consume of a client class')
# as it's not actually a client class, but rather, this method is being monkey patched
# onto the class and so the access is valid
new_things = {}
iterat = consumer.to_consume.items if PY3 else consumer.to_consume.iteritems
for name, stmts in iterat():
if isinstance(stmts[0], ImportFrom):
if any([n[0] in ('ForeignKey', 'OneToOneField') for n in stmts[0].names]):
continue
new_things[name] = stmts
consumer._atomic = ScopeConsumer(new_things, consumer.consumed, consumer.scope_type) # pylint: disable=W0212
self._to_consume = [consumer] # pylint: disable=W0212
return orig_method(self, node)
def foreign_key_sets(chain, node):
"""
When a Django model has a ForeignKey to another model, the target
of the foreign key gets a '<modelname>_set' attribute for accessing
a queryset of the model owning the foreign key - eg:
class ModelA(models.Model):
pass
class ModelB(models.Model):
a = models.ForeignKey(ModelA)
Now, ModelA instances will have a modelb_set attribute.
It's also possible to explicitly name the relationship using the related_name argument
to the ForeignKey constructor. As it's impossible to know this without inspecting all
models before processing, we'll instead do a "best guess" approach and see if the attribute
being accessed goes on to be used as a queryset. This is via 'duck typing': if the method
called on the attribute being accessed is something we might find in a queryset, we'll
warn.
"""
quack = False
if node.attrname in MANAGER_ATTRS or node.attrname.endswith('_set'):
# if this is a X_set method, that's a pretty strong signal that this is the default
# Django name, rather than one set by related_name
quack = True
else:
# we will
if isinstance(node.parent, Attribute):
func_name = getattr(node.parent, 'attrname', None)
if func_name in MANAGER_ATTRS:
quack = True
if quack:
children = list(node.get_children())
for child in children:
try:
inferred_cls = child.inferred()
except InferenceError:
pass
else:
for cls in inferred_cls:
if (node_is_subclass(cls,
'django.db.models.manager.Manager',
'django.db.models.base.Model',
'.Model',
'django.db.models.fields.related.ForeignObject')):
# This means that we are looking at a subclass of models.Model
# and something is trying to access a <something>_set attribute.
# Since this could exist, we will return so as not to raise an
# error.
return
chain()
def foreign_key_ids(chain, node):
if node.attrname.endswith('_id'):
return
chain()
def is_model_admin_subclass(node):
"""Checks that node is derivative of ModelAdmin class."""
if node.name[-5:] != 'Admin' or isinstance(node.parent, ClassDef):
return False
return node_is_subclass(node, 'django.contrib.admin.options.ModelAdmin')
def is_model_media_subclass(node):
"""Checks that node is derivative of Media class."""
if node.name != 'Media' or not isinstance(node.parent, ClassDef):
return False
parents = ('django.contrib.admin.options.ModelAdmin',
'django.forms.widgets.Media',
'django.db.models.base.Model',
'.Model', # for the transformed version used in this plugin
'django.forms.forms.Form',
'.Form',
'django.forms.widgets.Widget',
'.Widget',
'django.forms.models.ModelForm',
'.ModelForm')
return node_is_subclass(node.parent, *parents)
def is_model_meta_subclass(node):
"""Checks that node is derivative of Meta class."""
if node.name != 'Meta' or not isinstance(node.parent, ClassDef):
return False
parents = ('.Model', # for the transformed version used here
'django.db.models.base.Model',
'.Form',
'django.forms.forms.Form',
'.ModelForm',
'django.forms.models.ModelForm',
'rest_framework.serializers.BaseSerializer',
'rest_framework.generics.GenericAPIView',
'rest_framework.viewsets.ReadOnlyModelViewSet',
'rest_framework.viewsets.ModelViewSet',
'django_filters.filterset.FilterSet',
'factory.django.DjangoModelFactory',)
return node_is_subclass(node.parent, *parents)
def is_model_factory(node):
"""Checks that node is derivative of DjangoModelFactory or SubFactory class."""
try:
parent_classes = node.expr.inferred()
except: # noqa: E722, pylint: disable=bare-except
return False
parents = ('factory.declarations.LazyFunction',
'factory.declarations.SubFactory',
'factory.django.DjangoModelFactory')
for parent_class in parent_classes:
try:
if parent_class.qname() in parents:
return True
if node_is_subclass(parent_class, *parents):
return True
except AttributeError:
continue
return False
def is_factory_post_generation_method(node):
if not node.decorators:
return False
for decorator in node.decorators.get_children():
try:
inferred = decorator.inferred()
except InferenceError:
continue
for target in inferred:
if target.qname() == 'factory.helpers.post_generation':
return True
return False
def is_model_mpttmeta_subclass(node):
"""Checks that node is derivative of MPTTMeta class."""
if node.name != 'MPTTMeta' or not isinstance(node.parent, ClassDef):
return False
parents = ('django.db.models.base.Model',
'.Model', # for the transformed version used in this plugin
'django.forms.forms.Form',
'.Form',
'django.forms.models.ModelForm',
'.ModelForm')
return node_is_subclass(node.parent, *parents)
def _attribute_is_magic(node, attrs, parents):
"""Checks that node is an attribute used inside one of allowed parents"""
if node.attrname not in attrs:
return False
if not node.last_child():
return False
try:
for cls in node.last_child().inferred():
if isinstance(cls, Super):
cls = cls._self_class # pylint: disable=protected-access
if node_is_subclass(cls, *parents) or cls.qname() in parents:
return True
except InferenceError:
pass
return False
def is_style_attribute(node):
parents = ('django.core.management.color.Style', )
return _attribute_is_magic(node, STYLE_ATTRS, parents)
def is_manager_attribute(node):
"""Checks that node is attribute of Manager or QuerySet class."""
parents = ('django.db.models.manager.Manager',
'.Manager',
'factory.base.BaseFactory.build',
'django.db.models.query.QuerySet',
'.QuerySet')
return _attribute_is_magic(node, MANAGER_ATTRS.union(QS_ATTRS), parents)
def is_admin_attribute(node):
"""Checks that node is attribute of BaseModelAdmin."""
parents = ('django.contrib.admin.options.BaseModelAdmin',
'.BaseModelAdmin')
return _attribute_is_magic(node, MODELADMIN_ATTRS, parents)
def is_model_attribute(node):
"""Checks that node is attribute of Model."""
parents = ('django.db.models.base.Model',
'.Model')
return _attribute_is_magic(node, MODEL_ATTRS, parents)
def is_field_attribute(node):
"""Checks that node is attribute of Field."""
parents = ('django.db.models.fields.Field',
'.Field')
return _attribute_is_magic(node, FIELD_ATTRS, parents)
def is_charfield_attribute(node):
"""Checks that node is attribute of CharField."""
parents = ('django.db.models.fields.CharField',
'.CharField')
return _attribute_is_magic(node, CHAR_FIELD_ATTRS, parents)
def is_datefield_attribute(node):
"""Checks that node is attribute of DateField."""
parents = ('django.db.models.fields.DateField',
'.DateField')
return _attribute_is_magic(node, DATE_FIELD_ATTRS, parents)
def is_decimalfield_attribute(node):
"""Checks that node is attribute of DecimalField."""
parents = ('django.db.models.fields.DecimalField',
'.DecimalField')
return _attribute_is_magic(node, DECIMAL_FIELD_ATTRS, parents)
def is_filefield_attribute(node):
"""Checks that node is attribute of FileField."""
parents = ('django.db.models.fields.files.FileField',
'.FileField')
return _attribute_is_magic(node, FILE_FIELD_ATTRS, parents)
def is_imagefield_attribute(node):
"""Checks that node is attribute of ImageField."""
parents = ('django.db.models.fields.files.ImageField',
'.ImageField')
return _attribute_is_magic(node, IMAGE_FIELD_ATTRS, parents)
def is_ipfield_attribute(node):
"""Checks that node is attribute of GenericIPAddressField."""
parents = ('django.db.models.fields.GenericIPAddressField',
'.GenericIPAddressField')
return _attribute_is_magic(node, IP_FIELD_ATTRS, parents)
def is_slugfield_attribute(node):
"""Checks that node is attribute of SlugField."""
parents = ('django.db.models.fields.SlugField',
'.SlugField')
return _attribute_is_magic(node, SLUG_FIELD_ATTRS, parents)
def is_foreignkeyfield_attribute(node):
"""Checks that node is attribute of ForeignKey."""
parents = ('django.db.models.fields.related.ForeignKey',
'.ForeignKey')
return _attribute_is_magic(node, FOREIGNKEY_FIELD_ATTRS, parents)
def is_manytomanyfield_attribute(node):
"""Checks that node is attribute of ManyToManyField."""
parents = ('django.db.models.fields.related.ManyToManyField',
'.ManyToManyField')
return _attribute_is_magic(node, MANYTOMANY_FIELD_ATTRS, parents)
def is_onetoonefield_attribute(node):
"""Checks that node is attribute of OneToOneField."""
parents = ('django.db.models.fields.related.OneToOneField',
'.OneToOneField')
return _attribute_is_magic(node, ONETOONE_FIELD_ATTRS, parents)
def is_form_attribute(node):
"""Checks that node is attribute of Form."""
parents = ('django.forms.forms.Form', 'django.forms.models.ModelForm')
return _attribute_is_magic(node, FORM_ATTRS, parents)
def is_model_test_case_subclass(node):
"""Checks that node is derivative of TestCase class."""
if not node.name.endswith('Test') and not isinstance(node.parent, ClassDef):
return False
return node_is_subclass(node, 'django.test.testcases.TestCase')
def generic_is_view_attribute(parents, attrs):
"""Generates is_X_attribute function for given parents and attrs."""
def is_attribute(node):
return _attribute_is_magic(node, attrs, parents)
return is_attribute
def is_model_view_subclass_method_shouldnt_be_function(node):
"""Checks that node is get or post method of the View class."""
if node.name not in ('get', 'post'):
return False
parent = node.parent
while parent and not isinstance(parent, ScopedClass):
parent = parent.parent
subclass = ('django.views.View',
'django.views.generic.View',
'django.views.generic.base.View',)
return parent is not None and node_is_subclass(parent, *subclass)
def is_model_view_subclass_unused_argument(node):
"""
Checks that node is get or post method of the View class and it has valid arguments.
TODO: Bad checkings, need to be more smart.
"""
if not is_model_view_subclass_method_shouldnt_be_function(node):
return False
return is_argument_named_request(node)
def is_argument_named_request(node):
"""
If an unused-argument is named 'request' ignore that!
"""
return 'request' in node.argnames()
def is_model_field_display_method(node):
"""Accept model's fields with get_*_display names."""
if not node.attrname.endswith('_display'):
return False
if not node.attrname.startswith('get_'):
return False
if node.last_child():
# TODO: could validate the names of the fields on the model rather than
# blindly accepting get_*_display
try:
for cls in node.last_child().inferred():
if node_is_subclass(cls, 'django.db.models.base.Model', '.Model'):
return True
except InferenceError:
return False
return False
def is_templatetags_module_valid_constant(node):
"""Suppress warnings for valid constants in templatetags module."""
if node.name not in ('register', ):
return False
parent = node.parent
while not isinstance(parent, Module):
parent = parent.parent
if "templatetags." not in parent.name:
return False
return True
def is_urls_module_valid_constant(node):
"""Suppress warnings for valid constants in urls module."""
if node.name not in ('urlpatterns', 'app_name'):
return False
parent = node.parent
while not isinstance(parent, Module):
parent = parent.parent
if not parent.name.endswith('urls'):
return False
return True
def allow_meta_protected_access(node):
if django_version >= (1, 8):
return node.attrname == '_meta'
return False
def is_class(class_name):
"""Shortcut for node_is_subclass."""
return lambda node: node_is_subclass(node, class_name)
def wrap(orig_method, with_method):
def wrap_func(*args, **kwargs):
with_method(orig_method, *args, **kwargs)
return wrap_func
def is_wsgi_application(node):
frame = node.frame()
return node.name == 'application' and isinstance(frame, Module) and \
(frame.name == 'wsgi' or frame.path[0].endswith('wsgi.py') or frame.file.endswith('wsgi.py'))
# Compat helpers
def pylint_newstyle_classdef_compat(linter, warning_name, augment):
if not hasattr(NewStyleConflictChecker, 'visit_classdef'):
return
suppress_message(linter, getattr(NewStyleConflictChecker, 'visit_classdef'), warning_name, augment)
# augment things
def apply_augmentations(linter):
"""Apply augmentation and suppression rules."""
augment_visit(linter, TypeChecker.visit_attribute, foreign_key_sets)
augment_visit(linter, TypeChecker.visit_attribute, foreign_key_ids)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_field_display_method)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_style_attribute)
suppress_message(linter, NameChecker.visit_assignname, 'invalid-name', is_urls_module_valid_constant)
# supress errors when accessing magical class attributes
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_manager_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_admin_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_field_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_charfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_datefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_decimalfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_filefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_imagefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_ipfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_slugfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_foreignkeyfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_manytomanyfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_onetoonefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_form_attribute)
for parents, attrs in VIEW_ATTRS:
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', generic_is_view_attribute(parents, attrs))
# formviews have too many ancestors, there's nothing the user of the library can do about that
suppress_message(linter, MisdesignChecker.visit_classdef, 'too-many-ancestors',
is_class('django.views.generic.edit.FormView'))
# model forms have no __init__ method anywhere in their bases
suppress_message(linter, ClassChecker.visit_classdef, 'W0232', is_class('django.forms.models.ModelForm'))
# Meta
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_meta_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_meta_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'no-init', is_model_meta_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_meta_subclass)
suppress_message(linter, ClassChecker.visit_attribute, 'protected-access', allow_meta_protected_access)
# Media
suppress_message(linter, NameChecker.visit_assignname, 'C0103', is_model_media_valid_attributes)
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_media_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_media_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'no-init', is_model_media_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_media_subclass)
# Admin
# Too many public methods (40+/20)
# TODO: Count public methods of django.contrib.admin.options.ModelAdmin and increase
# MisdesignChecker.config.max_public_methods to this value to count only user' methods.
# nb_public_methods = 0
# for method in node.methods():
# if not method.name.startswith('_'):
# nb_public_methods += 1
suppress_message(linter, MisdesignChecker.leave_classdef, 'R0904', is_model_admin_subclass)
# Tests
suppress_message(linter, MisdesignChecker.leave_classdef, 'R0904', is_model_test_case_subclass)
# View
# Method could be a function (get, post)
suppress_message(linter, ClassChecker.leave_functiondef, 'no-self-use',
is_model_view_subclass_method_shouldnt_be_function)
# Unused argument 'request' (get, post)
suppress_message(linter, VariablesChecker.leave_functiondef, 'unused-argument',
is_model_view_subclass_unused_argument)
suppress_message(linter, VariablesChecker.leave_functiondef, 'unused-argument', is_argument_named_request)
# django-mptt
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_mpttmeta_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_mpttmeta_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'W0232', is_model_mpttmeta_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_mpttmeta_subclass)
# factory_boy's DjangoModelFactory
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_factory)
suppress_message(linter, ClassChecker.visit_functiondef, 'no-self-argument', is_factory_post_generation_method)
# ForeignKey and OneToOneField
# Must update this in a thread safe way to support the parallel option on pylint (-j)
current_leave_module = VariablesChecker.leave_module
if current_leave_module.__name__ == 'leave_module':
# current_leave_module is not wrapped
# Two threads may hit the next assignment concurrently, but the result is the same
VariablesChecker.leave_module = wrap(current_leave_module, ignore_import_warnings_for_related_fields)
# VariablesChecker.leave_module is now wrapped
# else VariablesChecker.leave_module is already wrapped
# wsgi.py
suppress_message(linter, NameChecker.visit_assignname, 'invalid-name', is_wsgi_application)
|
PyCQA/pylint-django
|
pylint_django/augmentations/__init__.py
|
is_templatetags_module_valid_constant
|
python
|
def is_templatetags_module_valid_constant(node):
if node.name not in ('register', ):
return False
parent = node.parent
while not isinstance(parent, Module):
parent = parent.parent
if "templatetags." not in parent.name:
return False
return True
|
Suppress warnings for valid constants in templatetags module.
|
train
|
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/augmentations/__init__.py#L697-L709
| null |
"""Augmentations."""
# pylint: disable=invalid-name
import itertools
from astroid import InferenceError
from astroid.objects import Super
from astroid.nodes import ClassDef, ImportFrom, Attribute
from astroid.scoped_nodes import ClassDef as ScopedClass, Module
from pylint.checkers.base import DocStringChecker, NameChecker
from pylint.checkers.design_analysis import MisdesignChecker
from pylint.checkers.classes import ClassChecker
from pylint.checkers.newstyle import NewStyleConflictChecker
from pylint.checkers.variables import VariablesChecker
from pylint.checkers.typecheck import TypeChecker
from pylint.checkers.variables import ScopeConsumer
from pylint_plugin_utils import augment_visit, suppress_message
from django import VERSION as django_version
from django.views.generic.base import View, RedirectView, ContextMixin
from django.views.generic.dates import DateMixin, DayMixin, MonthMixin, WeekMixin, YearMixin
from django.views.generic.detail import SingleObjectMixin, SingleObjectTemplateResponseMixin, TemplateResponseMixin
from django.views.generic.edit import DeletionMixin, FormMixin, ModelFormMixin
from django.views.generic.list import MultipleObjectMixin, MultipleObjectTemplateResponseMixin
from django.utils import termcolors
from pylint_django.utils import node_is_subclass, PY3
# Note: it would have been nice to import the Manager object from Django and
# get its attributes that way - and this used to be the method - but unfortunately
# there's no guarantee that Django is properly configured at that stage, and importing
# anything from the django.db package causes an ImproperlyConfigured exception.
# Therefore we'll fall back on a hard-coded list of attributes which won't be as accurate,
# but this is not 100% accurate anyway.
MANAGER_ATTRS = {
'none',
'all',
'count',
'dates',
'distinct',
'extra',
'get',
'get_or_create',
'get_queryset',
'create',
'bulk_create',
'filter',
'aggregate',
'annotate',
'complex_filter',
'exclude',
'in_bulk',
'iterator',
'latest',
'order_by',
'select_for_update',
'select_related',
'prefetch_related',
'values',
'values_list',
'update',
'reverse',
'defer',
'only',
'using',
'exists',
}
QS_ATTRS = {
'filter',
'exclude',
'annotate',
'order_by',
'reverse',
'distinct',
'values',
'values_list',
'dates',
'datetimes',
'none',
'all',
'select_related',
'prefetch_related',
'extra',
'defer',
'only',
'using',
'select_for_update',
'raw',
'get',
'create',
'get_or_create',
'update_or_create',
'bulk_create',
'count',
'in_bulk',
'iterator',
'latest',
'earliest',
'first',
'last',
'aggregate',
'exists',
'update',
'delete',
'as_manager',
'expression',
'output_field',
}
MODELADMIN_ATTRS = {
# options
'actions',
'actions_on_top',
'actions_on_bottom',
'actions_selection_counter',
'date_hierarchy',
'empty_value_display',
'exclude',
'fields',
'fieldsets',
'filter_horizontal',
'filter_vertical',
'form',
'formfield_overrides',
'inlines',
'list_display',
'list_display_links',
'list_editable',
'list_filter',
'list_max_show_all',
'list_per_page',
'list_select_related',
'ordering',
'paginator',
'prepopulated_fields',
'preserve_filters',
'radio_fields',
'raw_id_fields',
'readonly_fields',
'save_as',
'save_on_top',
'search_fields',
'show_full_result_count',
'view_on_site',
# template options
'add_form_template',
'change_form_template',
'change_list_template',
'delete_confirmation_template',
'delete_selected_confirmation_template',
'object_history_template',
}
MODEL_ATTRS = {
'id',
'DoesNotExist',
'MultipleObjectsReturned',
'_base_manager',
'_default_manager',
'_meta',
'delete',
'get_next_by_date',
'get_previous_by_date',
'objects',
'save',
}
FIELD_ATTRS = {
'null',
'blank',
'choices',
'db_column',
'db_index',
'db_tablespace',
'default',
'editable',
'error_messages',
'help_text',
'primary_key',
'unique',
'unique_for_date',
'unique_for_month',
'unique_for_year',
'verbose_name',
'validators',
}
CHAR_FIELD_ATTRS = {
'max_length',
}
DATE_FIELD_ATTRS = {
'auto_now',
'auto_now_add',
}
DECIMAL_FIELD_ATTRS = {
'max_digits',
'decimal_places',
}
FILE_FIELD_ATTRS = {
'upload_to',
'storage',
}
IMAGE_FIELD_ATTRS = {
'height_field',
'width_field',
}
IP_FIELD_ATTRS = {
'protocol',
'unpack_ipv4',
}
SLUG_FIELD_ATTRS = {
'allow_unicode',
}
FOREIGNKEY_FIELD_ATTRS = {
'limit_choices_to',
'related_name',
'related_query_name',
'to_field',
'db_constraint',
'swappable',
}
MANYTOMANY_FIELD_ATTRS = {
'add',
'clear',
'related_name',
'related_query_name',
'limit_choices_to',
'symmetrical',
'through',
'through_fields',
'db_table',
'db_constraint',
'swappable',
}
ONETOONE_FIELD_ATTRS = {
'parent_link',
}
STYLE_ATTRS = set(itertools.chain.from_iterable(termcolors.PALETTES.values()))
VIEW_ATTRS = {
(
(
'{}.{}'.format(cls.__module__, cls.__name__),
'.{}'.format(cls.__name__)
),
tuple(cls.__dict__.keys())
) for cls in (
View, RedirectView, ContextMixin,
DateMixin, DayMixin, MonthMixin, WeekMixin, YearMixin,
SingleObjectMixin, SingleObjectTemplateResponseMixin, TemplateResponseMixin,
DeletionMixin, FormMixin, ModelFormMixin,
MultipleObjectMixin, MultipleObjectTemplateResponseMixin,
)
}
FORM_ATTRS = {
'declared_fields',
}
def ignore_import_warnings_for_related_fields(orig_method, self, node):
"""
Replaces the leave_module method on the VariablesChecker class to
prevent unused-import warnings which are caused by the ForeignKey
and OneToOneField transformations. By replacing the nodes in the
AST with their type rather than the django field, imports of the
form 'from django.db.models import OneToOneField' raise an unused-import
warning
"""
consumer = self._to_consume[0] # pylint: disable=W0212
# we can disable this warning ('Access to a protected member _to_consume of a client class')
# as it's not actually a client class, but rather, this method is being monkey patched
# onto the class and so the access is valid
new_things = {}
iterat = consumer.to_consume.items if PY3 else consumer.to_consume.iteritems
for name, stmts in iterat():
if isinstance(stmts[0], ImportFrom):
if any([n[0] in ('ForeignKey', 'OneToOneField') for n in stmts[0].names]):
continue
new_things[name] = stmts
consumer._atomic = ScopeConsumer(new_things, consumer.consumed, consumer.scope_type) # pylint: disable=W0212
self._to_consume = [consumer] # pylint: disable=W0212
return orig_method(self, node)
def foreign_key_sets(chain, node):
"""
When a Django model has a ForeignKey to another model, the target
of the foreign key gets a '<modelname>_set' attribute for accessing
a queryset of the model owning the foreign key - eg:
class ModelA(models.Model):
pass
class ModelB(models.Model):
a = models.ForeignKey(ModelA)
Now, ModelA instances will have a modelb_set attribute.
It's also possible to explicitly name the relationship using the related_name argument
to the ForeignKey constructor. As it's impossible to know this without inspecting all
models before processing, we'll instead do a "best guess" approach and see if the attribute
being accessed goes on to be used as a queryset. This is via 'duck typing': if the method
called on the attribute being accessed is something we might find in a queryset, we'll
warn.
"""
quack = False
if node.attrname in MANAGER_ATTRS or node.attrname.endswith('_set'):
# if this is a X_set method, that's a pretty strong signal that this is the default
# Django name, rather than one set by related_name
quack = True
else:
# we will
if isinstance(node.parent, Attribute):
func_name = getattr(node.parent, 'attrname', None)
if func_name in MANAGER_ATTRS:
quack = True
if quack:
children = list(node.get_children())
for child in children:
try:
inferred_cls = child.inferred()
except InferenceError:
pass
else:
for cls in inferred_cls:
if (node_is_subclass(cls,
'django.db.models.manager.Manager',
'django.db.models.base.Model',
'.Model',
'django.db.models.fields.related.ForeignObject')):
# This means that we are looking at a subclass of models.Model
# and something is trying to access a <something>_set attribute.
# Since this could exist, we will return so as not to raise an
# error.
return
chain()
def foreign_key_ids(chain, node):
if node.attrname.endswith('_id'):
return
chain()
def is_model_admin_subclass(node):
"""Checks that node is derivative of ModelAdmin class."""
if node.name[-5:] != 'Admin' or isinstance(node.parent, ClassDef):
return False
return node_is_subclass(node, 'django.contrib.admin.options.ModelAdmin')
def is_model_media_subclass(node):
"""Checks that node is derivative of Media class."""
if node.name != 'Media' or not isinstance(node.parent, ClassDef):
return False
parents = ('django.contrib.admin.options.ModelAdmin',
'django.forms.widgets.Media',
'django.db.models.base.Model',
'.Model', # for the transformed version used in this plugin
'django.forms.forms.Form',
'.Form',
'django.forms.widgets.Widget',
'.Widget',
'django.forms.models.ModelForm',
'.ModelForm')
return node_is_subclass(node.parent, *parents)
def is_model_meta_subclass(node):
"""Checks that node is derivative of Meta class."""
if node.name != 'Meta' or not isinstance(node.parent, ClassDef):
return False
parents = ('.Model', # for the transformed version used here
'django.db.models.base.Model',
'.Form',
'django.forms.forms.Form',
'.ModelForm',
'django.forms.models.ModelForm',
'rest_framework.serializers.BaseSerializer',
'rest_framework.generics.GenericAPIView',
'rest_framework.viewsets.ReadOnlyModelViewSet',
'rest_framework.viewsets.ModelViewSet',
'django_filters.filterset.FilterSet',
'factory.django.DjangoModelFactory',)
return node_is_subclass(node.parent, *parents)
def is_model_factory(node):
"""Checks that node is derivative of DjangoModelFactory or SubFactory class."""
try:
parent_classes = node.expr.inferred()
except: # noqa: E722, pylint: disable=bare-except
return False
parents = ('factory.declarations.LazyFunction',
'factory.declarations.SubFactory',
'factory.django.DjangoModelFactory')
for parent_class in parent_classes:
try:
if parent_class.qname() in parents:
return True
if node_is_subclass(parent_class, *parents):
return True
except AttributeError:
continue
return False
def is_factory_post_generation_method(node):
if not node.decorators:
return False
for decorator in node.decorators.get_children():
try:
inferred = decorator.inferred()
except InferenceError:
continue
for target in inferred:
if target.qname() == 'factory.helpers.post_generation':
return True
return False
def is_model_mpttmeta_subclass(node):
"""Checks that node is derivative of MPTTMeta class."""
if node.name != 'MPTTMeta' or not isinstance(node.parent, ClassDef):
return False
parents = ('django.db.models.base.Model',
'.Model', # for the transformed version used in this plugin
'django.forms.forms.Form',
'.Form',
'django.forms.models.ModelForm',
'.ModelForm')
return node_is_subclass(node.parent, *parents)
def _attribute_is_magic(node, attrs, parents):
"""Checks that node is an attribute used inside one of allowed parents"""
if node.attrname not in attrs:
return False
if not node.last_child():
return False
try:
for cls in node.last_child().inferred():
if isinstance(cls, Super):
cls = cls._self_class # pylint: disable=protected-access
if node_is_subclass(cls, *parents) or cls.qname() in parents:
return True
except InferenceError:
pass
return False
def is_style_attribute(node):
parents = ('django.core.management.color.Style', )
return _attribute_is_magic(node, STYLE_ATTRS, parents)
def is_manager_attribute(node):
"""Checks that node is attribute of Manager or QuerySet class."""
parents = ('django.db.models.manager.Manager',
'.Manager',
'factory.base.BaseFactory.build',
'django.db.models.query.QuerySet',
'.QuerySet')
return _attribute_is_magic(node, MANAGER_ATTRS.union(QS_ATTRS), parents)
def is_admin_attribute(node):
"""Checks that node is attribute of BaseModelAdmin."""
parents = ('django.contrib.admin.options.BaseModelAdmin',
'.BaseModelAdmin')
return _attribute_is_magic(node, MODELADMIN_ATTRS, parents)
def is_model_attribute(node):
"""Checks that node is attribute of Model."""
parents = ('django.db.models.base.Model',
'.Model')
return _attribute_is_magic(node, MODEL_ATTRS, parents)
def is_field_attribute(node):
"""Checks that node is attribute of Field."""
parents = ('django.db.models.fields.Field',
'.Field')
return _attribute_is_magic(node, FIELD_ATTRS, parents)
def is_charfield_attribute(node):
"""Checks that node is attribute of CharField."""
parents = ('django.db.models.fields.CharField',
'.CharField')
return _attribute_is_magic(node, CHAR_FIELD_ATTRS, parents)
def is_datefield_attribute(node):
"""Checks that node is attribute of DateField."""
parents = ('django.db.models.fields.DateField',
'.DateField')
return _attribute_is_magic(node, DATE_FIELD_ATTRS, parents)
def is_decimalfield_attribute(node):
"""Checks that node is attribute of DecimalField."""
parents = ('django.db.models.fields.DecimalField',
'.DecimalField')
return _attribute_is_magic(node, DECIMAL_FIELD_ATTRS, parents)
def is_filefield_attribute(node):
"""Checks that node is attribute of FileField."""
parents = ('django.db.models.fields.files.FileField',
'.FileField')
return _attribute_is_magic(node, FILE_FIELD_ATTRS, parents)
def is_imagefield_attribute(node):
"""Checks that node is attribute of ImageField."""
parents = ('django.db.models.fields.files.ImageField',
'.ImageField')
return _attribute_is_magic(node, IMAGE_FIELD_ATTRS, parents)
def is_ipfield_attribute(node):
"""Checks that node is attribute of GenericIPAddressField."""
parents = ('django.db.models.fields.GenericIPAddressField',
'.GenericIPAddressField')
return _attribute_is_magic(node, IP_FIELD_ATTRS, parents)
def is_slugfield_attribute(node):
"""Checks that node is attribute of SlugField."""
parents = ('django.db.models.fields.SlugField',
'.SlugField')
return _attribute_is_magic(node, SLUG_FIELD_ATTRS, parents)
def is_foreignkeyfield_attribute(node):
"""Checks that node is attribute of ForeignKey."""
parents = ('django.db.models.fields.related.ForeignKey',
'.ForeignKey')
return _attribute_is_magic(node, FOREIGNKEY_FIELD_ATTRS, parents)
def is_manytomanyfield_attribute(node):
"""Checks that node is attribute of ManyToManyField."""
parents = ('django.db.models.fields.related.ManyToManyField',
'.ManyToManyField')
return _attribute_is_magic(node, MANYTOMANY_FIELD_ATTRS, parents)
def is_onetoonefield_attribute(node):
"""Checks that node is attribute of OneToOneField."""
parents = ('django.db.models.fields.related.OneToOneField',
'.OneToOneField')
return _attribute_is_magic(node, ONETOONE_FIELD_ATTRS, parents)
def is_form_attribute(node):
"""Checks that node is attribute of Form."""
parents = ('django.forms.forms.Form', 'django.forms.models.ModelForm')
return _attribute_is_magic(node, FORM_ATTRS, parents)
def is_model_test_case_subclass(node):
"""Checks that node is derivative of TestCase class."""
if not node.name.endswith('Test') and not isinstance(node.parent, ClassDef):
return False
return node_is_subclass(node, 'django.test.testcases.TestCase')
def generic_is_view_attribute(parents, attrs):
"""Generates is_X_attribute function for given parents and attrs."""
def is_attribute(node):
return _attribute_is_magic(node, attrs, parents)
return is_attribute
def is_model_view_subclass_method_shouldnt_be_function(node):
"""Checks that node is get or post method of the View class."""
if node.name not in ('get', 'post'):
return False
parent = node.parent
while parent and not isinstance(parent, ScopedClass):
parent = parent.parent
subclass = ('django.views.View',
'django.views.generic.View',
'django.views.generic.base.View',)
return parent is not None and node_is_subclass(parent, *subclass)
def is_model_view_subclass_unused_argument(node):
"""
Checks that node is get or post method of the View class and it has valid arguments.
TODO: Bad checkings, need to be more smart.
"""
if not is_model_view_subclass_method_shouldnt_be_function(node):
return False
return is_argument_named_request(node)
def is_argument_named_request(node):
"""
If an unused-argument is named 'request' ignore that!
"""
return 'request' in node.argnames()
def is_model_field_display_method(node):
"""Accept model's fields with get_*_display names."""
if not node.attrname.endswith('_display'):
return False
if not node.attrname.startswith('get_'):
return False
if node.last_child():
# TODO: could validate the names of the fields on the model rather than
# blindly accepting get_*_display
try:
for cls in node.last_child().inferred():
if node_is_subclass(cls, 'django.db.models.base.Model', '.Model'):
return True
except InferenceError:
return False
return False
def is_model_media_valid_attributes(node):
"""Suppress warnings for valid attributes of Media class."""
if node.name not in ('js', ):
return False
parent = node.parent
while parent and not isinstance(parent, ScopedClass):
parent = parent.parent
if parent is None or parent.name != "Media":
return False
return True
def is_urls_module_valid_constant(node):
"""Suppress warnings for valid constants in urls module."""
if node.name not in ('urlpatterns', 'app_name'):
return False
parent = node.parent
while not isinstance(parent, Module):
parent = parent.parent
if not parent.name.endswith('urls'):
return False
return True
def allow_meta_protected_access(node):
if django_version >= (1, 8):
return node.attrname == '_meta'
return False
def is_class(class_name):
"""Shortcut for node_is_subclass."""
return lambda node: node_is_subclass(node, class_name)
def wrap(orig_method, with_method):
def wrap_func(*args, **kwargs):
with_method(orig_method, *args, **kwargs)
return wrap_func
def is_wsgi_application(node):
frame = node.frame()
return node.name == 'application' and isinstance(frame, Module) and \
(frame.name == 'wsgi' or frame.path[0].endswith('wsgi.py') or frame.file.endswith('wsgi.py'))
# Compat helpers
def pylint_newstyle_classdef_compat(linter, warning_name, augment):
if not hasattr(NewStyleConflictChecker, 'visit_classdef'):
return
suppress_message(linter, getattr(NewStyleConflictChecker, 'visit_classdef'), warning_name, augment)
# augment things
def apply_augmentations(linter):
"""Apply augmentation and suppression rules."""
augment_visit(linter, TypeChecker.visit_attribute, foreign_key_sets)
augment_visit(linter, TypeChecker.visit_attribute, foreign_key_ids)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_field_display_method)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_style_attribute)
suppress_message(linter, NameChecker.visit_assignname, 'invalid-name', is_urls_module_valid_constant)
# supress errors when accessing magical class attributes
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_manager_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_admin_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_field_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_charfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_datefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_decimalfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_filefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_imagefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_ipfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_slugfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_foreignkeyfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_manytomanyfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_onetoonefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_form_attribute)
for parents, attrs in VIEW_ATTRS:
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', generic_is_view_attribute(parents, attrs))
# formviews have too many ancestors, there's nothing the user of the library can do about that
suppress_message(linter, MisdesignChecker.visit_classdef, 'too-many-ancestors',
is_class('django.views.generic.edit.FormView'))
# model forms have no __init__ method anywhere in their bases
suppress_message(linter, ClassChecker.visit_classdef, 'W0232', is_class('django.forms.models.ModelForm'))
# Meta
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_meta_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_meta_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'no-init', is_model_meta_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_meta_subclass)
suppress_message(linter, ClassChecker.visit_attribute, 'protected-access', allow_meta_protected_access)
# Media
suppress_message(linter, NameChecker.visit_assignname, 'C0103', is_model_media_valid_attributes)
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_media_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_media_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'no-init', is_model_media_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_media_subclass)
# Admin
# Too many public methods (40+/20)
# TODO: Count public methods of django.contrib.admin.options.ModelAdmin and increase
# MisdesignChecker.config.max_public_methods to this value to count only user' methods.
# nb_public_methods = 0
# for method in node.methods():
# if not method.name.startswith('_'):
# nb_public_methods += 1
suppress_message(linter, MisdesignChecker.leave_classdef, 'R0904', is_model_admin_subclass)
# Tests
suppress_message(linter, MisdesignChecker.leave_classdef, 'R0904', is_model_test_case_subclass)
# View
# Method could be a function (get, post)
suppress_message(linter, ClassChecker.leave_functiondef, 'no-self-use',
is_model_view_subclass_method_shouldnt_be_function)
# Unused argument 'request' (get, post)
suppress_message(linter, VariablesChecker.leave_functiondef, 'unused-argument',
is_model_view_subclass_unused_argument)
suppress_message(linter, VariablesChecker.leave_functiondef, 'unused-argument', is_argument_named_request)
# django-mptt
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_mpttmeta_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_mpttmeta_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'W0232', is_model_mpttmeta_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_mpttmeta_subclass)
# factory_boy's DjangoModelFactory
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_factory)
suppress_message(linter, ClassChecker.visit_functiondef, 'no-self-argument', is_factory_post_generation_method)
# ForeignKey and OneToOneField
# Must update this in a thread safe way to support the parallel option on pylint (-j)
current_leave_module = VariablesChecker.leave_module
if current_leave_module.__name__ == 'leave_module':
# current_leave_module is not wrapped
# Two threads may hit the next assignment concurrently, but the result is the same
VariablesChecker.leave_module = wrap(current_leave_module, ignore_import_warnings_for_related_fields)
# VariablesChecker.leave_module is now wrapped
# else VariablesChecker.leave_module is already wrapped
# wsgi.py
suppress_message(linter, NameChecker.visit_assignname, 'invalid-name', is_wsgi_application)
|
PyCQA/pylint-django
|
pylint_django/augmentations/__init__.py
|
is_urls_module_valid_constant
|
python
|
def is_urls_module_valid_constant(node):
if node.name not in ('urlpatterns', 'app_name'):
return False
parent = node.parent
while not isinstance(parent, Module):
parent = parent.parent
if not parent.name.endswith('urls'):
return False
return True
|
Suppress warnings for valid constants in urls module.
|
train
|
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/augmentations/__init__.py#L712-L724
| null |
"""Augmentations."""
# pylint: disable=invalid-name
import itertools
from astroid import InferenceError
from astroid.objects import Super
from astroid.nodes import ClassDef, ImportFrom, Attribute
from astroid.scoped_nodes import ClassDef as ScopedClass, Module
from pylint.checkers.base import DocStringChecker, NameChecker
from pylint.checkers.design_analysis import MisdesignChecker
from pylint.checkers.classes import ClassChecker
from pylint.checkers.newstyle import NewStyleConflictChecker
from pylint.checkers.variables import VariablesChecker
from pylint.checkers.typecheck import TypeChecker
from pylint.checkers.variables import ScopeConsumer
from pylint_plugin_utils import augment_visit, suppress_message
from django import VERSION as django_version
from django.views.generic.base import View, RedirectView, ContextMixin
from django.views.generic.dates import DateMixin, DayMixin, MonthMixin, WeekMixin, YearMixin
from django.views.generic.detail import SingleObjectMixin, SingleObjectTemplateResponseMixin, TemplateResponseMixin
from django.views.generic.edit import DeletionMixin, FormMixin, ModelFormMixin
from django.views.generic.list import MultipleObjectMixin, MultipleObjectTemplateResponseMixin
from django.utils import termcolors
from pylint_django.utils import node_is_subclass, PY3
# Note: it would have been nice to import the Manager object from Django and
# get its attributes that way - and this used to be the method - but unfortunately
# there's no guarantee that Django is properly configured at that stage, and importing
# anything from the django.db package causes an ImproperlyConfigured exception.
# Therefore we'll fall back on a hard-coded list of attributes which won't be as accurate,
# but this is not 100% accurate anyway.
MANAGER_ATTRS = {
'none',
'all',
'count',
'dates',
'distinct',
'extra',
'get',
'get_or_create',
'get_queryset',
'create',
'bulk_create',
'filter',
'aggregate',
'annotate',
'complex_filter',
'exclude',
'in_bulk',
'iterator',
'latest',
'order_by',
'select_for_update',
'select_related',
'prefetch_related',
'values',
'values_list',
'update',
'reverse',
'defer',
'only',
'using',
'exists',
}
QS_ATTRS = {
'filter',
'exclude',
'annotate',
'order_by',
'reverse',
'distinct',
'values',
'values_list',
'dates',
'datetimes',
'none',
'all',
'select_related',
'prefetch_related',
'extra',
'defer',
'only',
'using',
'select_for_update',
'raw',
'get',
'create',
'get_or_create',
'update_or_create',
'bulk_create',
'count',
'in_bulk',
'iterator',
'latest',
'earliest',
'first',
'last',
'aggregate',
'exists',
'update',
'delete',
'as_manager',
'expression',
'output_field',
}
MODELADMIN_ATTRS = {
# options
'actions',
'actions_on_top',
'actions_on_bottom',
'actions_selection_counter',
'date_hierarchy',
'empty_value_display',
'exclude',
'fields',
'fieldsets',
'filter_horizontal',
'filter_vertical',
'form',
'formfield_overrides',
'inlines',
'list_display',
'list_display_links',
'list_editable',
'list_filter',
'list_max_show_all',
'list_per_page',
'list_select_related',
'ordering',
'paginator',
'prepopulated_fields',
'preserve_filters',
'radio_fields',
'raw_id_fields',
'readonly_fields',
'save_as',
'save_on_top',
'search_fields',
'show_full_result_count',
'view_on_site',
# template options
'add_form_template',
'change_form_template',
'change_list_template',
'delete_confirmation_template',
'delete_selected_confirmation_template',
'object_history_template',
}
MODEL_ATTRS = {
'id',
'DoesNotExist',
'MultipleObjectsReturned',
'_base_manager',
'_default_manager',
'_meta',
'delete',
'get_next_by_date',
'get_previous_by_date',
'objects',
'save',
}
FIELD_ATTRS = {
'null',
'blank',
'choices',
'db_column',
'db_index',
'db_tablespace',
'default',
'editable',
'error_messages',
'help_text',
'primary_key',
'unique',
'unique_for_date',
'unique_for_month',
'unique_for_year',
'verbose_name',
'validators',
}
CHAR_FIELD_ATTRS = {
'max_length',
}
DATE_FIELD_ATTRS = {
'auto_now',
'auto_now_add',
}
DECIMAL_FIELD_ATTRS = {
'max_digits',
'decimal_places',
}
FILE_FIELD_ATTRS = {
'upload_to',
'storage',
}
IMAGE_FIELD_ATTRS = {
'height_field',
'width_field',
}
IP_FIELD_ATTRS = {
'protocol',
'unpack_ipv4',
}
SLUG_FIELD_ATTRS = {
'allow_unicode',
}
FOREIGNKEY_FIELD_ATTRS = {
'limit_choices_to',
'related_name',
'related_query_name',
'to_field',
'db_constraint',
'swappable',
}
MANYTOMANY_FIELD_ATTRS = {
'add',
'clear',
'related_name',
'related_query_name',
'limit_choices_to',
'symmetrical',
'through',
'through_fields',
'db_table',
'db_constraint',
'swappable',
}
ONETOONE_FIELD_ATTRS = {
'parent_link',
}
STYLE_ATTRS = set(itertools.chain.from_iterable(termcolors.PALETTES.values()))
VIEW_ATTRS = {
(
(
'{}.{}'.format(cls.__module__, cls.__name__),
'.{}'.format(cls.__name__)
),
tuple(cls.__dict__.keys())
) for cls in (
View, RedirectView, ContextMixin,
DateMixin, DayMixin, MonthMixin, WeekMixin, YearMixin,
SingleObjectMixin, SingleObjectTemplateResponseMixin, TemplateResponseMixin,
DeletionMixin, FormMixin, ModelFormMixin,
MultipleObjectMixin, MultipleObjectTemplateResponseMixin,
)
}
FORM_ATTRS = {
'declared_fields',
}
def ignore_import_warnings_for_related_fields(orig_method, self, node):
"""
Replaces the leave_module method on the VariablesChecker class to
prevent unused-import warnings which are caused by the ForeignKey
and OneToOneField transformations. By replacing the nodes in the
AST with their type rather than the django field, imports of the
form 'from django.db.models import OneToOneField' raise an unused-import
warning
"""
consumer = self._to_consume[0] # pylint: disable=W0212
# we can disable this warning ('Access to a protected member _to_consume of a client class')
# as it's not actually a client class, but rather, this method is being monkey patched
# onto the class and so the access is valid
new_things = {}
iterat = consumer.to_consume.items if PY3 else consumer.to_consume.iteritems
for name, stmts in iterat():
if isinstance(stmts[0], ImportFrom):
if any([n[0] in ('ForeignKey', 'OneToOneField') for n in stmts[0].names]):
continue
new_things[name] = stmts
consumer._atomic = ScopeConsumer(new_things, consumer.consumed, consumer.scope_type) # pylint: disable=W0212
self._to_consume = [consumer] # pylint: disable=W0212
return orig_method(self, node)
def foreign_key_sets(chain, node):
"""
When a Django model has a ForeignKey to another model, the target
of the foreign key gets a '<modelname>_set' attribute for accessing
a queryset of the model owning the foreign key - eg:
class ModelA(models.Model):
pass
class ModelB(models.Model):
a = models.ForeignKey(ModelA)
Now, ModelA instances will have a modelb_set attribute.
It's also possible to explicitly name the relationship using the related_name argument
to the ForeignKey constructor. As it's impossible to know this without inspecting all
models before processing, we'll instead do a "best guess" approach and see if the attribute
being accessed goes on to be used as a queryset. This is via 'duck typing': if the method
called on the attribute being accessed is something we might find in a queryset, we'll
warn.
"""
quack = False
if node.attrname in MANAGER_ATTRS or node.attrname.endswith('_set'):
# if this is a X_set method, that's a pretty strong signal that this is the default
# Django name, rather than one set by related_name
quack = True
else:
# we will
if isinstance(node.parent, Attribute):
func_name = getattr(node.parent, 'attrname', None)
if func_name in MANAGER_ATTRS:
quack = True
if quack:
children = list(node.get_children())
for child in children:
try:
inferred_cls = child.inferred()
except InferenceError:
pass
else:
for cls in inferred_cls:
if (node_is_subclass(cls,
'django.db.models.manager.Manager',
'django.db.models.base.Model',
'.Model',
'django.db.models.fields.related.ForeignObject')):
# This means that we are looking at a subclass of models.Model
# and something is trying to access a <something>_set attribute.
# Since this could exist, we will return so as not to raise an
# error.
return
chain()
def foreign_key_ids(chain, node):
if node.attrname.endswith('_id'):
return
chain()
def is_model_admin_subclass(node):
"""Checks that node is derivative of ModelAdmin class."""
if node.name[-5:] != 'Admin' or isinstance(node.parent, ClassDef):
return False
return node_is_subclass(node, 'django.contrib.admin.options.ModelAdmin')
def is_model_media_subclass(node):
"""Checks that node is derivative of Media class."""
if node.name != 'Media' or not isinstance(node.parent, ClassDef):
return False
parents = ('django.contrib.admin.options.ModelAdmin',
'django.forms.widgets.Media',
'django.db.models.base.Model',
'.Model', # for the transformed version used in this plugin
'django.forms.forms.Form',
'.Form',
'django.forms.widgets.Widget',
'.Widget',
'django.forms.models.ModelForm',
'.ModelForm')
return node_is_subclass(node.parent, *parents)
def is_model_meta_subclass(node):
"""Checks that node is derivative of Meta class."""
if node.name != 'Meta' or not isinstance(node.parent, ClassDef):
return False
parents = ('.Model', # for the transformed version used here
'django.db.models.base.Model',
'.Form',
'django.forms.forms.Form',
'.ModelForm',
'django.forms.models.ModelForm',
'rest_framework.serializers.BaseSerializer',
'rest_framework.generics.GenericAPIView',
'rest_framework.viewsets.ReadOnlyModelViewSet',
'rest_framework.viewsets.ModelViewSet',
'django_filters.filterset.FilterSet',
'factory.django.DjangoModelFactory',)
return node_is_subclass(node.parent, *parents)
def is_model_factory(node):
"""Checks that node is derivative of DjangoModelFactory or SubFactory class."""
try:
parent_classes = node.expr.inferred()
except: # noqa: E722, pylint: disable=bare-except
return False
parents = ('factory.declarations.LazyFunction',
'factory.declarations.SubFactory',
'factory.django.DjangoModelFactory')
for parent_class in parent_classes:
try:
if parent_class.qname() in parents:
return True
if node_is_subclass(parent_class, *parents):
return True
except AttributeError:
continue
return False
def is_factory_post_generation_method(node):
if not node.decorators:
return False
for decorator in node.decorators.get_children():
try:
inferred = decorator.inferred()
except InferenceError:
continue
for target in inferred:
if target.qname() == 'factory.helpers.post_generation':
return True
return False
def is_model_mpttmeta_subclass(node):
"""Checks that node is derivative of MPTTMeta class."""
if node.name != 'MPTTMeta' or not isinstance(node.parent, ClassDef):
return False
parents = ('django.db.models.base.Model',
'.Model', # for the transformed version used in this plugin
'django.forms.forms.Form',
'.Form',
'django.forms.models.ModelForm',
'.ModelForm')
return node_is_subclass(node.parent, *parents)
def _attribute_is_magic(node, attrs, parents):
"""Checks that node is an attribute used inside one of allowed parents"""
if node.attrname not in attrs:
return False
if not node.last_child():
return False
try:
for cls in node.last_child().inferred():
if isinstance(cls, Super):
cls = cls._self_class # pylint: disable=protected-access
if node_is_subclass(cls, *parents) or cls.qname() in parents:
return True
except InferenceError:
pass
return False
def is_style_attribute(node):
parents = ('django.core.management.color.Style', )
return _attribute_is_magic(node, STYLE_ATTRS, parents)
def is_manager_attribute(node):
"""Checks that node is attribute of Manager or QuerySet class."""
parents = ('django.db.models.manager.Manager',
'.Manager',
'factory.base.BaseFactory.build',
'django.db.models.query.QuerySet',
'.QuerySet')
return _attribute_is_magic(node, MANAGER_ATTRS.union(QS_ATTRS), parents)
def is_admin_attribute(node):
"""Checks that node is attribute of BaseModelAdmin."""
parents = ('django.contrib.admin.options.BaseModelAdmin',
'.BaseModelAdmin')
return _attribute_is_magic(node, MODELADMIN_ATTRS, parents)
def is_model_attribute(node):
"""Checks that node is attribute of Model."""
parents = ('django.db.models.base.Model',
'.Model')
return _attribute_is_magic(node, MODEL_ATTRS, parents)
def is_field_attribute(node):
"""Checks that node is attribute of Field."""
parents = ('django.db.models.fields.Field',
'.Field')
return _attribute_is_magic(node, FIELD_ATTRS, parents)
def is_charfield_attribute(node):
"""Checks that node is attribute of CharField."""
parents = ('django.db.models.fields.CharField',
'.CharField')
return _attribute_is_magic(node, CHAR_FIELD_ATTRS, parents)
def is_datefield_attribute(node):
"""Checks that node is attribute of DateField."""
parents = ('django.db.models.fields.DateField',
'.DateField')
return _attribute_is_magic(node, DATE_FIELD_ATTRS, parents)
def is_decimalfield_attribute(node):
"""Checks that node is attribute of DecimalField."""
parents = ('django.db.models.fields.DecimalField',
'.DecimalField')
return _attribute_is_magic(node, DECIMAL_FIELD_ATTRS, parents)
def is_filefield_attribute(node):
"""Checks that node is attribute of FileField."""
parents = ('django.db.models.fields.files.FileField',
'.FileField')
return _attribute_is_magic(node, FILE_FIELD_ATTRS, parents)
def is_imagefield_attribute(node):
"""Checks that node is attribute of ImageField."""
parents = ('django.db.models.fields.files.ImageField',
'.ImageField')
return _attribute_is_magic(node, IMAGE_FIELD_ATTRS, parents)
def is_ipfield_attribute(node):
"""Checks that node is attribute of GenericIPAddressField."""
parents = ('django.db.models.fields.GenericIPAddressField',
'.GenericIPAddressField')
return _attribute_is_magic(node, IP_FIELD_ATTRS, parents)
def is_slugfield_attribute(node):
"""Checks that node is attribute of SlugField."""
parents = ('django.db.models.fields.SlugField',
'.SlugField')
return _attribute_is_magic(node, SLUG_FIELD_ATTRS, parents)
def is_foreignkeyfield_attribute(node):
"""Checks that node is attribute of ForeignKey."""
parents = ('django.db.models.fields.related.ForeignKey',
'.ForeignKey')
return _attribute_is_magic(node, FOREIGNKEY_FIELD_ATTRS, parents)
def is_manytomanyfield_attribute(node):
"""Checks that node is attribute of ManyToManyField."""
parents = ('django.db.models.fields.related.ManyToManyField',
'.ManyToManyField')
return _attribute_is_magic(node, MANYTOMANY_FIELD_ATTRS, parents)
def is_onetoonefield_attribute(node):
"""Checks that node is attribute of OneToOneField."""
parents = ('django.db.models.fields.related.OneToOneField',
'.OneToOneField')
return _attribute_is_magic(node, ONETOONE_FIELD_ATTRS, parents)
def is_form_attribute(node):
"""Checks that node is attribute of Form."""
parents = ('django.forms.forms.Form', 'django.forms.models.ModelForm')
return _attribute_is_magic(node, FORM_ATTRS, parents)
def is_model_test_case_subclass(node):
"""Checks that node is derivative of TestCase class."""
if not node.name.endswith('Test') and not isinstance(node.parent, ClassDef):
return False
return node_is_subclass(node, 'django.test.testcases.TestCase')
def generic_is_view_attribute(parents, attrs):
"""Generates is_X_attribute function for given parents and attrs."""
def is_attribute(node):
return _attribute_is_magic(node, attrs, parents)
return is_attribute
def is_model_view_subclass_method_shouldnt_be_function(node):
"""Checks that node is get or post method of the View class."""
if node.name not in ('get', 'post'):
return False
parent = node.parent
while parent and not isinstance(parent, ScopedClass):
parent = parent.parent
subclass = ('django.views.View',
'django.views.generic.View',
'django.views.generic.base.View',)
return parent is not None and node_is_subclass(parent, *subclass)
def is_model_view_subclass_unused_argument(node):
"""
Checks that node is get or post method of the View class and it has valid arguments.
TODO: Bad checkings, need to be more smart.
"""
if not is_model_view_subclass_method_shouldnt_be_function(node):
return False
return is_argument_named_request(node)
def is_argument_named_request(node):
"""
If an unused-argument is named 'request' ignore that!
"""
return 'request' in node.argnames()
def is_model_field_display_method(node):
"""Accept model's fields with get_*_display names."""
if not node.attrname.endswith('_display'):
return False
if not node.attrname.startswith('get_'):
return False
if node.last_child():
# TODO: could validate the names of the fields on the model rather than
# blindly accepting get_*_display
try:
for cls in node.last_child().inferred():
if node_is_subclass(cls, 'django.db.models.base.Model', '.Model'):
return True
except InferenceError:
return False
return False
def is_model_media_valid_attributes(node):
"""Suppress warnings for valid attributes of Media class."""
if node.name not in ('js', ):
return False
parent = node.parent
while parent and not isinstance(parent, ScopedClass):
parent = parent.parent
if parent is None or parent.name != "Media":
return False
return True
def is_templatetags_module_valid_constant(node):
"""Suppress warnings for valid constants in templatetags module."""
if node.name not in ('register', ):
return False
parent = node.parent
while not isinstance(parent, Module):
parent = parent.parent
if "templatetags." not in parent.name:
return False
return True
def allow_meta_protected_access(node):
if django_version >= (1, 8):
return node.attrname == '_meta'
return False
def is_class(class_name):
"""Shortcut for node_is_subclass."""
return lambda node: node_is_subclass(node, class_name)
def wrap(orig_method, with_method):
def wrap_func(*args, **kwargs):
with_method(orig_method, *args, **kwargs)
return wrap_func
def is_wsgi_application(node):
frame = node.frame()
return node.name == 'application' and isinstance(frame, Module) and \
(frame.name == 'wsgi' or frame.path[0].endswith('wsgi.py') or frame.file.endswith('wsgi.py'))
# Compat helpers
def pylint_newstyle_classdef_compat(linter, warning_name, augment):
if not hasattr(NewStyleConflictChecker, 'visit_classdef'):
return
suppress_message(linter, getattr(NewStyleConflictChecker, 'visit_classdef'), warning_name, augment)
# augment things
def apply_augmentations(linter):
"""Apply augmentation and suppression rules."""
augment_visit(linter, TypeChecker.visit_attribute, foreign_key_sets)
augment_visit(linter, TypeChecker.visit_attribute, foreign_key_ids)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_field_display_method)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_style_attribute)
suppress_message(linter, NameChecker.visit_assignname, 'invalid-name', is_urls_module_valid_constant)
# supress errors when accessing magical class attributes
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_manager_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_admin_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_field_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_charfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_datefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_decimalfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_filefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_imagefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_ipfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_slugfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_foreignkeyfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_manytomanyfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_onetoonefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_form_attribute)
for parents, attrs in VIEW_ATTRS:
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', generic_is_view_attribute(parents, attrs))
# formviews have too many ancestors, there's nothing the user of the library can do about that
suppress_message(linter, MisdesignChecker.visit_classdef, 'too-many-ancestors',
is_class('django.views.generic.edit.FormView'))
# model forms have no __init__ method anywhere in their bases
suppress_message(linter, ClassChecker.visit_classdef, 'W0232', is_class('django.forms.models.ModelForm'))
# Meta
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_meta_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_meta_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'no-init', is_model_meta_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_meta_subclass)
suppress_message(linter, ClassChecker.visit_attribute, 'protected-access', allow_meta_protected_access)
# Media
suppress_message(linter, NameChecker.visit_assignname, 'C0103', is_model_media_valid_attributes)
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_media_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_media_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'no-init', is_model_media_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_media_subclass)
# Admin
# Too many public methods (40+/20)
# TODO: Count public methods of django.contrib.admin.options.ModelAdmin and increase
# MisdesignChecker.config.max_public_methods to this value to count only user' methods.
# nb_public_methods = 0
# for method in node.methods():
# if not method.name.startswith('_'):
# nb_public_methods += 1
suppress_message(linter, MisdesignChecker.leave_classdef, 'R0904', is_model_admin_subclass)
# Tests
suppress_message(linter, MisdesignChecker.leave_classdef, 'R0904', is_model_test_case_subclass)
# View
# Method could be a function (get, post)
suppress_message(linter, ClassChecker.leave_functiondef, 'no-self-use',
is_model_view_subclass_method_shouldnt_be_function)
# Unused argument 'request' (get, post)
suppress_message(linter, VariablesChecker.leave_functiondef, 'unused-argument',
is_model_view_subclass_unused_argument)
suppress_message(linter, VariablesChecker.leave_functiondef, 'unused-argument', is_argument_named_request)
# django-mptt
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_mpttmeta_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_mpttmeta_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'W0232', is_model_mpttmeta_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_mpttmeta_subclass)
# factory_boy's DjangoModelFactory
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_factory)
suppress_message(linter, ClassChecker.visit_functiondef, 'no-self-argument', is_factory_post_generation_method)
# ForeignKey and OneToOneField
# Must update this in a thread safe way to support the parallel option on pylint (-j)
current_leave_module = VariablesChecker.leave_module
if current_leave_module.__name__ == 'leave_module':
# current_leave_module is not wrapped
# Two threads may hit the next assignment concurrently, but the result is the same
VariablesChecker.leave_module = wrap(current_leave_module, ignore_import_warnings_for_related_fields)
# VariablesChecker.leave_module is now wrapped
# else VariablesChecker.leave_module is already wrapped
# wsgi.py
suppress_message(linter, NameChecker.visit_assignname, 'invalid-name', is_wsgi_application)
|
PyCQA/pylint-django
|
pylint_django/augmentations/__init__.py
|
apply_augmentations
|
python
|
def apply_augmentations(linter):
augment_visit(linter, TypeChecker.visit_attribute, foreign_key_sets)
augment_visit(linter, TypeChecker.visit_attribute, foreign_key_ids)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_field_display_method)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_style_attribute)
suppress_message(linter, NameChecker.visit_assignname, 'invalid-name', is_urls_module_valid_constant)
# supress errors when accessing magical class attributes
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_manager_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_admin_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_field_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_charfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_datefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_decimalfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_filefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_imagefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_ipfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_slugfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_foreignkeyfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_manytomanyfield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_onetoonefield_attribute)
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_form_attribute)
for parents, attrs in VIEW_ATTRS:
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', generic_is_view_attribute(parents, attrs))
# formviews have too many ancestors, there's nothing the user of the library can do about that
suppress_message(linter, MisdesignChecker.visit_classdef, 'too-many-ancestors',
is_class('django.views.generic.edit.FormView'))
# model forms have no __init__ method anywhere in their bases
suppress_message(linter, ClassChecker.visit_classdef, 'W0232', is_class('django.forms.models.ModelForm'))
# Meta
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_meta_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_meta_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'no-init', is_model_meta_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_meta_subclass)
suppress_message(linter, ClassChecker.visit_attribute, 'protected-access', allow_meta_protected_access)
# Media
suppress_message(linter, NameChecker.visit_assignname, 'C0103', is_model_media_valid_attributes)
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_media_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_media_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'no-init', is_model_media_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_media_subclass)
# Admin
# Too many public methods (40+/20)
# TODO: Count public methods of django.contrib.admin.options.ModelAdmin and increase
# MisdesignChecker.config.max_public_methods to this value to count only user' methods.
# nb_public_methods = 0
# for method in node.methods():
# if not method.name.startswith('_'):
# nb_public_methods += 1
suppress_message(linter, MisdesignChecker.leave_classdef, 'R0904', is_model_admin_subclass)
# Tests
suppress_message(linter, MisdesignChecker.leave_classdef, 'R0904', is_model_test_case_subclass)
# View
# Method could be a function (get, post)
suppress_message(linter, ClassChecker.leave_functiondef, 'no-self-use',
is_model_view_subclass_method_shouldnt_be_function)
# Unused argument 'request' (get, post)
suppress_message(linter, VariablesChecker.leave_functiondef, 'unused-argument',
is_model_view_subclass_unused_argument)
suppress_message(linter, VariablesChecker.leave_functiondef, 'unused-argument', is_argument_named_request)
# django-mptt
suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_mpttmeta_subclass)
pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_mpttmeta_subclass)
suppress_message(linter, ClassChecker.visit_classdef, 'W0232', is_model_mpttmeta_subclass)
suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_mpttmeta_subclass)
# factory_boy's DjangoModelFactory
suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_factory)
suppress_message(linter, ClassChecker.visit_functiondef, 'no-self-argument', is_factory_post_generation_method)
# ForeignKey and OneToOneField
# Must update this in a thread safe way to support the parallel option on pylint (-j)
current_leave_module = VariablesChecker.leave_module
if current_leave_module.__name__ == 'leave_module':
# current_leave_module is not wrapped
# Two threads may hit the next assignment concurrently, but the result is the same
VariablesChecker.leave_module = wrap(current_leave_module, ignore_import_warnings_for_related_fields)
# VariablesChecker.leave_module is now wrapped
# else VariablesChecker.leave_module is already wrapped
# wsgi.py
suppress_message(linter, NameChecker.visit_assignname, 'invalid-name', is_wsgi_application)
|
Apply augmentation and suppression rules.
|
train
|
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/augmentations/__init__.py#L759-L851
|
[
"def generic_is_view_attribute(parents, attrs):\n \"\"\"Generates is_X_attribute function for given parents and attrs.\"\"\"\n def is_attribute(node):\n return _attribute_is_magic(node, attrs, parents)\n return is_attribute\n",
"def is_class(class_name):\n \"\"\"Shortcut for node_is_subclass.\"\"\"\n return lambda node: node_is_subclass(node, class_name)\n",
"def wrap(orig_method, with_method):\n def wrap_func(*args, **kwargs):\n with_method(orig_method, *args, **kwargs)\n return wrap_func\n",
"def pylint_newstyle_classdef_compat(linter, warning_name, augment):\n if not hasattr(NewStyleConflictChecker, 'visit_classdef'):\n return\n suppress_message(linter, getattr(NewStyleConflictChecker, 'visit_classdef'), warning_name, augment)\n"
] |
"""Augmentations."""
# pylint: disable=invalid-name
import itertools
from astroid import InferenceError
from astroid.objects import Super
from astroid.nodes import ClassDef, ImportFrom, Attribute
from astroid.scoped_nodes import ClassDef as ScopedClass, Module
from pylint.checkers.base import DocStringChecker, NameChecker
from pylint.checkers.design_analysis import MisdesignChecker
from pylint.checkers.classes import ClassChecker
from pylint.checkers.newstyle import NewStyleConflictChecker
from pylint.checkers.variables import VariablesChecker
from pylint.checkers.typecheck import TypeChecker
from pylint.checkers.variables import ScopeConsumer
from pylint_plugin_utils import augment_visit, suppress_message
from django import VERSION as django_version
from django.views.generic.base import View, RedirectView, ContextMixin
from django.views.generic.dates import DateMixin, DayMixin, MonthMixin, WeekMixin, YearMixin
from django.views.generic.detail import SingleObjectMixin, SingleObjectTemplateResponseMixin, TemplateResponseMixin
from django.views.generic.edit import DeletionMixin, FormMixin, ModelFormMixin
from django.views.generic.list import MultipleObjectMixin, MultipleObjectTemplateResponseMixin
from django.utils import termcolors
from pylint_django.utils import node_is_subclass, PY3
# Note: it would have been nice to import the Manager object from Django and
# get its attributes that way - and this used to be the method - but unfortunately
# there's no guarantee that Django is properly configured at that stage, and importing
# anything from the django.db package causes an ImproperlyConfigured exception.
# Therefore we'll fall back on a hard-coded list of attributes which won't be as accurate,
# but this is not 100% accurate anyway.
MANAGER_ATTRS = {
'none',
'all',
'count',
'dates',
'distinct',
'extra',
'get',
'get_or_create',
'get_queryset',
'create',
'bulk_create',
'filter',
'aggregate',
'annotate',
'complex_filter',
'exclude',
'in_bulk',
'iterator',
'latest',
'order_by',
'select_for_update',
'select_related',
'prefetch_related',
'values',
'values_list',
'update',
'reverse',
'defer',
'only',
'using',
'exists',
}
QS_ATTRS = {
'filter',
'exclude',
'annotate',
'order_by',
'reverse',
'distinct',
'values',
'values_list',
'dates',
'datetimes',
'none',
'all',
'select_related',
'prefetch_related',
'extra',
'defer',
'only',
'using',
'select_for_update',
'raw',
'get',
'create',
'get_or_create',
'update_or_create',
'bulk_create',
'count',
'in_bulk',
'iterator',
'latest',
'earliest',
'first',
'last',
'aggregate',
'exists',
'update',
'delete',
'as_manager',
'expression',
'output_field',
}
MODELADMIN_ATTRS = {
# options
'actions',
'actions_on_top',
'actions_on_bottom',
'actions_selection_counter',
'date_hierarchy',
'empty_value_display',
'exclude',
'fields',
'fieldsets',
'filter_horizontal',
'filter_vertical',
'form',
'formfield_overrides',
'inlines',
'list_display',
'list_display_links',
'list_editable',
'list_filter',
'list_max_show_all',
'list_per_page',
'list_select_related',
'ordering',
'paginator',
'prepopulated_fields',
'preserve_filters',
'radio_fields',
'raw_id_fields',
'readonly_fields',
'save_as',
'save_on_top',
'search_fields',
'show_full_result_count',
'view_on_site',
# template options
'add_form_template',
'change_form_template',
'change_list_template',
'delete_confirmation_template',
'delete_selected_confirmation_template',
'object_history_template',
}
MODEL_ATTRS = {
'id',
'DoesNotExist',
'MultipleObjectsReturned',
'_base_manager',
'_default_manager',
'_meta',
'delete',
'get_next_by_date',
'get_previous_by_date',
'objects',
'save',
}
FIELD_ATTRS = {
'null',
'blank',
'choices',
'db_column',
'db_index',
'db_tablespace',
'default',
'editable',
'error_messages',
'help_text',
'primary_key',
'unique',
'unique_for_date',
'unique_for_month',
'unique_for_year',
'verbose_name',
'validators',
}
CHAR_FIELD_ATTRS = {
'max_length',
}
DATE_FIELD_ATTRS = {
'auto_now',
'auto_now_add',
}
DECIMAL_FIELD_ATTRS = {
'max_digits',
'decimal_places',
}
FILE_FIELD_ATTRS = {
'upload_to',
'storage',
}
IMAGE_FIELD_ATTRS = {
'height_field',
'width_field',
}
IP_FIELD_ATTRS = {
'protocol',
'unpack_ipv4',
}
SLUG_FIELD_ATTRS = {
'allow_unicode',
}
FOREIGNKEY_FIELD_ATTRS = {
'limit_choices_to',
'related_name',
'related_query_name',
'to_field',
'db_constraint',
'swappable',
}
MANYTOMANY_FIELD_ATTRS = {
'add',
'clear',
'related_name',
'related_query_name',
'limit_choices_to',
'symmetrical',
'through',
'through_fields',
'db_table',
'db_constraint',
'swappable',
}
ONETOONE_FIELD_ATTRS = {
'parent_link',
}
STYLE_ATTRS = set(itertools.chain.from_iterable(termcolors.PALETTES.values()))
VIEW_ATTRS = {
(
(
'{}.{}'.format(cls.__module__, cls.__name__),
'.{}'.format(cls.__name__)
),
tuple(cls.__dict__.keys())
) for cls in (
View, RedirectView, ContextMixin,
DateMixin, DayMixin, MonthMixin, WeekMixin, YearMixin,
SingleObjectMixin, SingleObjectTemplateResponseMixin, TemplateResponseMixin,
DeletionMixin, FormMixin, ModelFormMixin,
MultipleObjectMixin, MultipleObjectTemplateResponseMixin,
)
}
FORM_ATTRS = {
'declared_fields',
}
def ignore_import_warnings_for_related_fields(orig_method, self, node):
"""
Replaces the leave_module method on the VariablesChecker class to
prevent unused-import warnings which are caused by the ForeignKey
and OneToOneField transformations. By replacing the nodes in the
AST with their type rather than the django field, imports of the
form 'from django.db.models import OneToOneField' raise an unused-import
warning
"""
consumer = self._to_consume[0] # pylint: disable=W0212
# we can disable this warning ('Access to a protected member _to_consume of a client class')
# as it's not actually a client class, but rather, this method is being monkey patched
# onto the class and so the access is valid
new_things = {}
iterat = consumer.to_consume.items if PY3 else consumer.to_consume.iteritems
for name, stmts in iterat():
if isinstance(stmts[0], ImportFrom):
if any([n[0] in ('ForeignKey', 'OneToOneField') for n in stmts[0].names]):
continue
new_things[name] = stmts
consumer._atomic = ScopeConsumer(new_things, consumer.consumed, consumer.scope_type) # pylint: disable=W0212
self._to_consume = [consumer] # pylint: disable=W0212
return orig_method(self, node)
def foreign_key_sets(chain, node):
"""
When a Django model has a ForeignKey to another model, the target
of the foreign key gets a '<modelname>_set' attribute for accessing
a queryset of the model owning the foreign key - eg:
class ModelA(models.Model):
pass
class ModelB(models.Model):
a = models.ForeignKey(ModelA)
Now, ModelA instances will have a modelb_set attribute.
It's also possible to explicitly name the relationship using the related_name argument
to the ForeignKey constructor. As it's impossible to know this without inspecting all
models before processing, we'll instead do a "best guess" approach and see if the attribute
being accessed goes on to be used as a queryset. This is via 'duck typing': if the method
called on the attribute being accessed is something we might find in a queryset, we'll
warn.
"""
quack = False
if node.attrname in MANAGER_ATTRS or node.attrname.endswith('_set'):
# if this is a X_set method, that's a pretty strong signal that this is the default
# Django name, rather than one set by related_name
quack = True
else:
# we will
if isinstance(node.parent, Attribute):
func_name = getattr(node.parent, 'attrname', None)
if func_name in MANAGER_ATTRS:
quack = True
if quack:
children = list(node.get_children())
for child in children:
try:
inferred_cls = child.inferred()
except InferenceError:
pass
else:
for cls in inferred_cls:
if (node_is_subclass(cls,
'django.db.models.manager.Manager',
'django.db.models.base.Model',
'.Model',
'django.db.models.fields.related.ForeignObject')):
# This means that we are looking at a subclass of models.Model
# and something is trying to access a <something>_set attribute.
# Since this could exist, we will return so as not to raise an
# error.
return
chain()
def foreign_key_ids(chain, node):
if node.attrname.endswith('_id'):
return
chain()
def is_model_admin_subclass(node):
"""Checks that node is derivative of ModelAdmin class."""
if node.name[-5:] != 'Admin' or isinstance(node.parent, ClassDef):
return False
return node_is_subclass(node, 'django.contrib.admin.options.ModelAdmin')
def is_model_media_subclass(node):
"""Checks that node is derivative of Media class."""
if node.name != 'Media' or not isinstance(node.parent, ClassDef):
return False
parents = ('django.contrib.admin.options.ModelAdmin',
'django.forms.widgets.Media',
'django.db.models.base.Model',
'.Model', # for the transformed version used in this plugin
'django.forms.forms.Form',
'.Form',
'django.forms.widgets.Widget',
'.Widget',
'django.forms.models.ModelForm',
'.ModelForm')
return node_is_subclass(node.parent, *parents)
def is_model_meta_subclass(node):
"""Checks that node is derivative of Meta class."""
if node.name != 'Meta' or not isinstance(node.parent, ClassDef):
return False
parents = ('.Model', # for the transformed version used here
'django.db.models.base.Model',
'.Form',
'django.forms.forms.Form',
'.ModelForm',
'django.forms.models.ModelForm',
'rest_framework.serializers.BaseSerializer',
'rest_framework.generics.GenericAPIView',
'rest_framework.viewsets.ReadOnlyModelViewSet',
'rest_framework.viewsets.ModelViewSet',
'django_filters.filterset.FilterSet',
'factory.django.DjangoModelFactory',)
return node_is_subclass(node.parent, *parents)
def is_model_factory(node):
"""Checks that node is derivative of DjangoModelFactory or SubFactory class."""
try:
parent_classes = node.expr.inferred()
except: # noqa: E722, pylint: disable=bare-except
return False
parents = ('factory.declarations.LazyFunction',
'factory.declarations.SubFactory',
'factory.django.DjangoModelFactory')
for parent_class in parent_classes:
try:
if parent_class.qname() in parents:
return True
if node_is_subclass(parent_class, *parents):
return True
except AttributeError:
continue
return False
def is_factory_post_generation_method(node):
if not node.decorators:
return False
for decorator in node.decorators.get_children():
try:
inferred = decorator.inferred()
except InferenceError:
continue
for target in inferred:
if target.qname() == 'factory.helpers.post_generation':
return True
return False
def is_model_mpttmeta_subclass(node):
"""Checks that node is derivative of MPTTMeta class."""
if node.name != 'MPTTMeta' or not isinstance(node.parent, ClassDef):
return False
parents = ('django.db.models.base.Model',
'.Model', # for the transformed version used in this plugin
'django.forms.forms.Form',
'.Form',
'django.forms.models.ModelForm',
'.ModelForm')
return node_is_subclass(node.parent, *parents)
def _attribute_is_magic(node, attrs, parents):
"""Checks that node is an attribute used inside one of allowed parents"""
if node.attrname not in attrs:
return False
if not node.last_child():
return False
try:
for cls in node.last_child().inferred():
if isinstance(cls, Super):
cls = cls._self_class # pylint: disable=protected-access
if node_is_subclass(cls, *parents) or cls.qname() in parents:
return True
except InferenceError:
pass
return False
def is_style_attribute(node):
parents = ('django.core.management.color.Style', )
return _attribute_is_magic(node, STYLE_ATTRS, parents)
def is_manager_attribute(node):
"""Checks that node is attribute of Manager or QuerySet class."""
parents = ('django.db.models.manager.Manager',
'.Manager',
'factory.base.BaseFactory.build',
'django.db.models.query.QuerySet',
'.QuerySet')
return _attribute_is_magic(node, MANAGER_ATTRS.union(QS_ATTRS), parents)
def is_admin_attribute(node):
"""Checks that node is attribute of BaseModelAdmin."""
parents = ('django.contrib.admin.options.BaseModelAdmin',
'.BaseModelAdmin')
return _attribute_is_magic(node, MODELADMIN_ATTRS, parents)
def is_model_attribute(node):
"""Checks that node is attribute of Model."""
parents = ('django.db.models.base.Model',
'.Model')
return _attribute_is_magic(node, MODEL_ATTRS, parents)
def is_field_attribute(node):
"""Checks that node is attribute of Field."""
parents = ('django.db.models.fields.Field',
'.Field')
return _attribute_is_magic(node, FIELD_ATTRS, parents)
def is_charfield_attribute(node):
"""Checks that node is attribute of CharField."""
parents = ('django.db.models.fields.CharField',
'.CharField')
return _attribute_is_magic(node, CHAR_FIELD_ATTRS, parents)
def is_datefield_attribute(node):
"""Checks that node is attribute of DateField."""
parents = ('django.db.models.fields.DateField',
'.DateField')
return _attribute_is_magic(node, DATE_FIELD_ATTRS, parents)
def is_decimalfield_attribute(node):
"""Checks that node is attribute of DecimalField."""
parents = ('django.db.models.fields.DecimalField',
'.DecimalField')
return _attribute_is_magic(node, DECIMAL_FIELD_ATTRS, parents)
def is_filefield_attribute(node):
"""Checks that node is attribute of FileField."""
parents = ('django.db.models.fields.files.FileField',
'.FileField')
return _attribute_is_magic(node, FILE_FIELD_ATTRS, parents)
def is_imagefield_attribute(node):
"""Checks that node is attribute of ImageField."""
parents = ('django.db.models.fields.files.ImageField',
'.ImageField')
return _attribute_is_magic(node, IMAGE_FIELD_ATTRS, parents)
def is_ipfield_attribute(node):
"""Checks that node is attribute of GenericIPAddressField."""
parents = ('django.db.models.fields.GenericIPAddressField',
'.GenericIPAddressField')
return _attribute_is_magic(node, IP_FIELD_ATTRS, parents)
def is_slugfield_attribute(node):
"""Checks that node is attribute of SlugField."""
parents = ('django.db.models.fields.SlugField',
'.SlugField')
return _attribute_is_magic(node, SLUG_FIELD_ATTRS, parents)
def is_foreignkeyfield_attribute(node):
"""Checks that node is attribute of ForeignKey."""
parents = ('django.db.models.fields.related.ForeignKey',
'.ForeignKey')
return _attribute_is_magic(node, FOREIGNKEY_FIELD_ATTRS, parents)
def is_manytomanyfield_attribute(node):
"""Checks that node is attribute of ManyToManyField."""
parents = ('django.db.models.fields.related.ManyToManyField',
'.ManyToManyField')
return _attribute_is_magic(node, MANYTOMANY_FIELD_ATTRS, parents)
def is_onetoonefield_attribute(node):
"""Checks that node is attribute of OneToOneField."""
parents = ('django.db.models.fields.related.OneToOneField',
'.OneToOneField')
return _attribute_is_magic(node, ONETOONE_FIELD_ATTRS, parents)
def is_form_attribute(node):
"""Checks that node is attribute of Form."""
parents = ('django.forms.forms.Form', 'django.forms.models.ModelForm')
return _attribute_is_magic(node, FORM_ATTRS, parents)
def is_model_test_case_subclass(node):
"""Checks that node is derivative of TestCase class."""
if not node.name.endswith('Test') and not isinstance(node.parent, ClassDef):
return False
return node_is_subclass(node, 'django.test.testcases.TestCase')
def generic_is_view_attribute(parents, attrs):
"""Generates is_X_attribute function for given parents and attrs."""
def is_attribute(node):
return _attribute_is_magic(node, attrs, parents)
return is_attribute
def is_model_view_subclass_method_shouldnt_be_function(node):
"""Checks that node is get or post method of the View class."""
if node.name not in ('get', 'post'):
return False
parent = node.parent
while parent and not isinstance(parent, ScopedClass):
parent = parent.parent
subclass = ('django.views.View',
'django.views.generic.View',
'django.views.generic.base.View',)
return parent is not None and node_is_subclass(parent, *subclass)
def is_model_view_subclass_unused_argument(node):
"""
Checks that node is get or post method of the View class and it has valid arguments.
TODO: Bad checkings, need to be more smart.
"""
if not is_model_view_subclass_method_shouldnt_be_function(node):
return False
return is_argument_named_request(node)
def is_argument_named_request(node):
"""
If an unused-argument is named 'request' ignore that!
"""
return 'request' in node.argnames()
def is_model_field_display_method(node):
"""Accept model's fields with get_*_display names."""
if not node.attrname.endswith('_display'):
return False
if not node.attrname.startswith('get_'):
return False
if node.last_child():
# TODO: could validate the names of the fields on the model rather than
# blindly accepting get_*_display
try:
for cls in node.last_child().inferred():
if node_is_subclass(cls, 'django.db.models.base.Model', '.Model'):
return True
except InferenceError:
return False
return False
def is_model_media_valid_attributes(node):
"""Suppress warnings for valid attributes of Media class."""
if node.name not in ('js', ):
return False
parent = node.parent
while parent and not isinstance(parent, ScopedClass):
parent = parent.parent
if parent is None or parent.name != "Media":
return False
return True
def is_templatetags_module_valid_constant(node):
"""Suppress warnings for valid constants in templatetags module."""
if node.name not in ('register', ):
return False
parent = node.parent
while not isinstance(parent, Module):
parent = parent.parent
if "templatetags." not in parent.name:
return False
return True
def is_urls_module_valid_constant(node):
"""Suppress warnings for valid constants in urls module."""
if node.name not in ('urlpatterns', 'app_name'):
return False
parent = node.parent
while not isinstance(parent, Module):
parent = parent.parent
if not parent.name.endswith('urls'):
return False
return True
def allow_meta_protected_access(node):
if django_version >= (1, 8):
return node.attrname == '_meta'
return False
def is_class(class_name):
"""Shortcut for node_is_subclass."""
return lambda node: node_is_subclass(node, class_name)
def wrap(orig_method, with_method):
def wrap_func(*args, **kwargs):
with_method(orig_method, *args, **kwargs)
return wrap_func
def is_wsgi_application(node):
frame = node.frame()
return node.name == 'application' and isinstance(frame, Module) and \
(frame.name == 'wsgi' or frame.path[0].endswith('wsgi.py') or frame.file.endswith('wsgi.py'))
# Compat helpers
def pylint_newstyle_classdef_compat(linter, warning_name, augment):
if not hasattr(NewStyleConflictChecker, 'visit_classdef'):
return
suppress_message(linter, getattr(NewStyleConflictChecker, 'visit_classdef'), warning_name, augment)
# augment things
|
PyCQA/pylint-django
|
pylint_django/checkers/models.py
|
ModelChecker.visit_classdef
|
python
|
def visit_classdef(self, node):
if not node_is_subclass(node, 'django.db.models.base.Model', '.Model'):
# we only care about models
return
for child in node.get_children():
if _is_meta_with_abstract(child):
return
if isinstance(child, Assign):
grandchildren = list(child.get_children())
if not isinstance(grandchildren[0], AssignName):
continue
name = grandchildren[0].name
if name != '__unicode__':
continue
grandchild = grandchildren[1]
assigned = grandchild.inferred()[0]
if assigned.callable():
return
self.add_message('E%s01' % BASE_ID, args=node.name, node=node)
return
if isinstance(child, FunctionDef) and child.name == '__unicode__':
if PY3:
self.add_message('W%s02' % BASE_ID, args=node.name, node=node)
return
# if we get here, then we have no __unicode__ method directly on the class itself
# a different warning is emitted if a parent declares __unicode__
for method in node.methods():
if method.parent != node and _is_unicode_or_str_in_python_2_compatibility(method):
# this happens if a parent declares the unicode method but
# this node does not
self.add_message('W%s03' % BASE_ID, args=node.name, node=node)
return
# if the Django compatibility decorator is used then we don't emit a warning
# see https://github.com/PyCQA/pylint-django/issues/10
if _has_python_2_unicode_compatible_decorator(node):
return
if PY3:
return
self.add_message('W%s01' % BASE_ID, args=node.name, node=node)
|
Class visitor.
|
train
|
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/checkers/models.py#L79-L131
|
[
"def node_is_subclass(cls, *subclass_names):\n \"\"\"Checks if cls node has parent with subclass_name.\"\"\"\n if not isinstance(cls, (ClassDef, Instance)):\n return False\n\n if cls.bases == Uninferable:\n return False\n for base_cls in cls.bases:\n try:\n for inf in base_cls.inferred():\n if inf.qname() in subclass_names:\n return True\n if inf != cls and node_is_subclass(inf, *subclass_names):\n # check up the hierarchy in case we are a subclass of\n # a subclass of a subclass ...\n return True\n except InferenceError:\n continue\n\n return False\n",
"def _is_meta_with_abstract(node):\n if isinstance(node, ClassDef) and node.name == 'Meta':\n for meta_child in node.get_children():\n if not isinstance(meta_child, Assign):\n continue\n if not meta_child.targets[0].name == 'abstract':\n continue\n if not isinstance(meta_child.value, Const):\n continue\n # TODO: handle tuple assignment?\n # eg:\n # abstract, something_else = True, 1\n if meta_child.value.value:\n # this class is abstract\n return True\n return False\n",
"def _has_python_2_unicode_compatible_decorator(node):\n if node.decorators is None:\n return False\n\n for decorator in node.decorators.nodes:\n if getattr(decorator, 'name', None) == 'python_2_unicode_compatible':\n return True\n\n return False\n"
] |
class ModelChecker(BaseChecker):
"""Django model checker."""
__implements__ = IAstroidChecker
name = 'django-model-checker'
msgs = MESSAGES
@check_messages('model-missing-unicode')
|
PyCQA/pylint-django
|
pylint_django/plugin.py
|
load_configuration
|
python
|
def load_configuration(linter):
name_checker = get_checker(linter, NameChecker)
name_checker.config.good_names += ('qs', 'urlpatterns', 'register', 'app_name', 'handler500')
# we don't care about South migrations
linter.config.black_list += ('migrations', 'south_migrations')
|
Amend existing checker config.
|
train
|
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/plugin.py#L13-L21
| null |
"""Common Django module."""
from pylint.checkers.base import NameChecker
from pylint_plugin_utils import get_checker
from pylint_django.checkers import register_checkers
# we want to import the transforms to make sure they get added to the astroid manager,
# however we don't actually access them directly, so we'll disable the warning
from pylint_django import transforms # noqa, pylint: disable=unused-import
from pylint_django import compat
def register(linter):
"""
Registering additional checkers.
"""
# add all of the checkers
register_checkers(linter)
# register any checking fiddlers
try:
from pylint_django.augmentations import apply_augmentations
apply_augmentations(linter)
except ImportError:
# probably trying to execute pylint_django when Django isn't installed
# in this case the django-not-installed checker will kick-in
pass
if not compat.LOAD_CONFIGURATION_SUPPORTED:
load_configuration(linter)
|
PyCQA/pylint-django
|
pylint_django/plugin.py
|
register
|
python
|
def register(linter):
# add all of the checkers
register_checkers(linter)
# register any checking fiddlers
try:
from pylint_django.augmentations import apply_augmentations
apply_augmentations(linter)
except ImportError:
# probably trying to execute pylint_django when Django isn't installed
# in this case the django-not-installed checker will kick-in
pass
if not compat.LOAD_CONFIGURATION_SUPPORTED:
load_configuration(linter)
|
Registering additional checkers.
|
train
|
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/plugin.py#L24-L41
|
[
"def apply_augmentations(linter):\n \"\"\"Apply augmentation and suppression rules.\"\"\"\n augment_visit(linter, TypeChecker.visit_attribute, foreign_key_sets)\n augment_visit(linter, TypeChecker.visit_attribute, foreign_key_ids)\n suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_field_display_method)\n suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_style_attribute)\n suppress_message(linter, NameChecker.visit_assignname, 'invalid-name', is_urls_module_valid_constant)\n\n # supress errors when accessing magical class attributes\n suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_manager_attribute)\n suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_admin_attribute)\n suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_attribute)\n suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_field_attribute)\n suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_charfield_attribute)\n suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_datefield_attribute)\n suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_decimalfield_attribute)\n suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_filefield_attribute)\n suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_imagefield_attribute)\n suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_ipfield_attribute)\n suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_slugfield_attribute)\n suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_foreignkeyfield_attribute)\n suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_manytomanyfield_attribute)\n suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_onetoonefield_attribute)\n suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_form_attribute)\n\n for parents, attrs in VIEW_ATTRS:\n suppress_message(linter, TypeChecker.visit_attribute, 'no-member', generic_is_view_attribute(parents, attrs))\n\n # formviews have too many ancestors, there's nothing the user of the library can do about that\n suppress_message(linter, MisdesignChecker.visit_classdef, 'too-many-ancestors',\n is_class('django.views.generic.edit.FormView'))\n\n # model forms have no __init__ method anywhere in their bases\n suppress_message(linter, ClassChecker.visit_classdef, 'W0232', is_class('django.forms.models.ModelForm'))\n\n # Meta\n suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_meta_subclass)\n pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_meta_subclass)\n suppress_message(linter, ClassChecker.visit_classdef, 'no-init', is_model_meta_subclass)\n suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_meta_subclass)\n suppress_message(linter, ClassChecker.visit_attribute, 'protected-access', allow_meta_protected_access)\n\n # Media\n suppress_message(linter, NameChecker.visit_assignname, 'C0103', is_model_media_valid_attributes)\n suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_media_subclass)\n pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_media_subclass)\n suppress_message(linter, ClassChecker.visit_classdef, 'no-init', is_model_media_subclass)\n suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_media_subclass)\n\n # Admin\n # Too many public methods (40+/20)\n # TODO: Count public methods of django.contrib.admin.options.ModelAdmin and increase\n # MisdesignChecker.config.max_public_methods to this value to count only user' methods.\n # nb_public_methods = 0\n # for method in node.methods():\n # if not method.name.startswith('_'):\n # nb_public_methods += 1\n suppress_message(linter, MisdesignChecker.leave_classdef, 'R0904', is_model_admin_subclass)\n\n # Tests\n suppress_message(linter, MisdesignChecker.leave_classdef, 'R0904', is_model_test_case_subclass)\n\n # View\n # Method could be a function (get, post)\n suppress_message(linter, ClassChecker.leave_functiondef, 'no-self-use',\n is_model_view_subclass_method_shouldnt_be_function)\n # Unused argument 'request' (get, post)\n suppress_message(linter, VariablesChecker.leave_functiondef, 'unused-argument',\n is_model_view_subclass_unused_argument)\n suppress_message(linter, VariablesChecker.leave_functiondef, 'unused-argument', is_argument_named_request)\n\n # django-mptt\n suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_mpttmeta_subclass)\n pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_mpttmeta_subclass)\n suppress_message(linter, ClassChecker.visit_classdef, 'W0232', is_model_mpttmeta_subclass)\n suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_mpttmeta_subclass)\n\n # factory_boy's DjangoModelFactory\n suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_factory)\n suppress_message(linter, ClassChecker.visit_functiondef, 'no-self-argument', is_factory_post_generation_method)\n\n # ForeignKey and OneToOneField\n # Must update this in a thread safe way to support the parallel option on pylint (-j)\n current_leave_module = VariablesChecker.leave_module\n if current_leave_module.__name__ == 'leave_module':\n # current_leave_module is not wrapped\n # Two threads may hit the next assignment concurrently, but the result is the same\n VariablesChecker.leave_module = wrap(current_leave_module, ignore_import_warnings_for_related_fields)\n # VariablesChecker.leave_module is now wrapped\n # else VariablesChecker.leave_module is already wrapped\n\n # wsgi.py\n suppress_message(linter, NameChecker.visit_assignname, 'invalid-name', is_wsgi_application)\n",
"def register_checkers(linter):\n \"\"\"Register checkers.\"\"\"\n linter.register_checker(ModelChecker(linter))\n linter.register_checker(DjangoInstalledChecker(linter))\n linter.register_checker(JsonResponseChecker(linter))\n linter.register_checker(FormChecker(linter))\n",
"def load_configuration(linter):\n \"\"\"\n Amend existing checker config.\n \"\"\"\n name_checker = get_checker(linter, NameChecker)\n name_checker.config.good_names += ('qs', 'urlpatterns', 'register', 'app_name', 'handler500')\n\n # we don't care about South migrations\n linter.config.black_list += ('migrations', 'south_migrations')\n"
] |
"""Common Django module."""
from pylint.checkers.base import NameChecker
from pylint_plugin_utils import get_checker
from pylint_django.checkers import register_checkers
# we want to import the transforms to make sure they get added to the astroid manager,
# however we don't actually access them directly, so we'll disable the warning
from pylint_django import transforms # noqa, pylint: disable=unused-import
from pylint_django import compat
def load_configuration(linter):
"""
Amend existing checker config.
"""
name_checker = get_checker(linter, NameChecker)
name_checker.config.good_names += ('qs', 'urlpatterns', 'register', 'app_name', 'handler500')
# we don't care about South migrations
linter.config.black_list += ('migrations', 'south_migrations')
|
jpype-project/jpype
|
jpype/_darwin.py
|
DarwinJVMFinder._javahome_binary
|
python
|
def _javahome_binary(self):
import platform
import subprocess
from distutils.version import StrictVersion
current = StrictVersion(platform.mac_ver()[0][:4])
if current >= StrictVersion('10.6') and current < StrictVersion('10.9'):
if hasattr(subprocess, 'check_output'):
java_home = subprocess.check_output(['/usr/libexec/java_home']).strip()
else:
java_home = subprocess.Popen(['/usr/libexec/java_home'], stdout=subprocess.PIPE).communicate()[0]
return java_home
|
for osx > 10.5 we have the nice util /usr/libexec/java_home available. Invoke it and
return its output. It seems this tool has been removed in osx 10.9.
|
train
|
https://github.com/jpype-project/jpype/blob/3ce953ae7b35244077249ce650b9acd0a7010d17/jpype/_darwin.py#L52-L67
| null |
class DarwinJVMFinder(LinuxJVMFinder):
"""
Mac OS X JVM library finder class
"""
def __init__(self):
"""
Sets up members
"""
# Call the parent constructor
LinuxJVMFinder.__init__(self)
# Library file name
self._libfile = "libjli.dylib"
self._methods = list(self._methods)
self._methods.append(self._pre_vm7_path)
self._methods.append(self._javahome_binary)
# Predefined locations
self._locations = ('/Library/Java/JavaVirtualMachines',)
def _pre_vm7_path(self):
"""
Returns the previous constant JVM library path:
'/System/Library/Frameworks/JavaVM.framework/JavaVM'
"""
return '/System/Library/Frameworks/JavaVM.framework/JavaVM'
|
jpype-project/jpype
|
setupext/build_thunk.py
|
BuildThunkCommand.run
|
python
|
def run(self):
self.announce(
'Building thunks',
level=distutils.log.INFO)
# run short circuit logic here
srcDir = os.path.join("build","lib")
destBody = os.path.join("build","src","jp_thunk.cpp")
destHeader = os.path.join("build","src","jp_thunk.h")
if os.path.isfile(destBody):
t1=os.path.getctime(destBody)
update =False
for filename in _glob(srcDir, "*.class"):
if t1<os.path.getctime(filename):
update=True
if not update:
self.announce(
'Skip build thunks',
level=distutils.log.INFO)
return
# do the build
createThunks(
srcDir,
destBody,
destHeader,
namespace="JPThunk")
|
Run command.
|
train
|
https://github.com/jpype-project/jpype/blob/3ce953ae7b35244077249ce650b9acd0a7010d17/setupext/build_thunk.py#L118-L145
|
[
"def _glob(directory,pattern):\n out = []\n for root, dirnames, filenames in os.walk(directory):\n for filename in fnmatch.filter(filenames, pattern):\n out.append(os.path.join(root,filename))\n return out\n",
"def createThunks(input_dir, output_src, output_header, namespace=\"Thunk\"):\n mkFileDir(output_src)\n mkFileDir(output_header)\n\n # Write the header\n with open(output_header,\"w+\") as fheader:\n sz=len(input_dir)\n guard=translate(output_header.upper(),'/\\\\.','___')\n print(\"#ifndef %s\"%guard, file=fheader)\n print(\"#define %s\"%guard, file=fheader)\n print(\"#include <jpype.h>\", file=fheader)\n print(\"namespace %s {\"%namespace, file=fheader)\n for filename in _glob(input_dir, \"*.class\"):\n name=translate(filename,'/\\\\.','___')[sz:-6]\n print(\"extern jbyte %s[];\"%name, file=fheader)\n print(\"extern int %s_size;\"%name, file=fheader)\n for filename in _glob(input_dir, \"*.jar\"):\n name=translate(filename,'/\\\\.','___')[sz:-4]\n print(\"extern jbyte %s[];\"%name, file=fheader)\n print(\"extern int %s_size;\"%name, file=fheader)\n print(\"}\", file=fheader)\n print(\"#endif\", file=fheader)\n\n # Write the body\n with open(output_src,\"w+\") as fimpl:\n sz=len(input_dir)\n print(\"#include <jp_thunk.h>\", file=fimpl)\n print(\"namespace %s {\"%namespace, file=fimpl)\n for filename in _glob(input_dir, \"*.class\"):\n print(\" including thunk %s\"%filename)\n name=translate(filename,'/\\\\.','___')[sz:-6]\n outputClass(filename, name, fimpl)\n for filename in _glob(input_dir, \"*.jar\"):\n print(\" including thunk %s\"%filename)\n name=translate(filename,'/\\\\.','___')[sz:-4]\n outputClass(filename, name, fimpl)\n print(\"}\", file=fimpl)\n"
] |
class BuildThunkCommand(distutils.cmd.Command):
"""A custom command to create thunk file."""
description = 'build dynamic code thunks'
user_options = [
]
def initialize_options(self):
"""Set default values for options."""
pass
def finalize_options(self):
"""Post-process options."""
pass
|
jpype-project/jpype
|
jpype/_windows.py
|
WindowsJVMFinder._get_from_registry
|
python
|
def _get_from_registry(self):
# Winreg is an optional package in cygwin
winreg = self._get_winreg()
if not winreg:
return None
for location in reg_keys:
try:
jreKey = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, location)
cv = winreg.QueryValueEx(jreKey, "CurrentVersion")
versionKey = winreg.OpenKey(jreKey, cv[0])
winreg.CloseKey(jreKey)
cv = winreg.QueryValueEx(versionKey, "RuntimeLib")
winreg.CloseKey(versionKey)
return cv[0]
except WindowsError:
pass
|
Retrieves the path to the default Java installation stored in the
Windows registry
:return: The path found in the registry, or None
|
train
|
https://github.com/jpype-project/jpype/blob/3ce953ae7b35244077249ce650b9acd0a7010d17/jpype/_windows.py#L99-L122
| null |
class WindowsJVMFinder(_jvmfinder.JVMFinder):
"""
Windows JVM library finder class
"""
def __init__(self):
"""
Sets up members
"""
# Call the parent constructor
_jvmfinder.JVMFinder.__init__(self)
# Library file name
self._libfile = "jvm.dll"
# Search methods
winreg = self._get_winreg()
if winreg is not None:
self._methods = (self._get_from_java_home, self._get_from_registry)
else:
self._methods = (self._get_from_java_home, )
def check(self, jvm):
_checkJVMArch(jvm)
def _get_winreg(self):
# Py2
try:
import _winreg as winreg
return winreg
except ImportError:
pass
# in Py3, winreg has been moved
try:
import winreg
return winreg
except ImportError:
pass
return None
|
jpype-project/jpype
|
jpype/_linux.py
|
LinuxJVMFinder._get_from_bin
|
python
|
def _get_from_bin(self):
# Find the real interpreter installation path
java_bin = os.path.realpath(self._java)
if os.path.exists(java_bin):
# Get to the home directory
java_home = os.path.abspath(os.path.join(os.path.dirname(java_bin),
'..'))
# Look for the JVM library
return self.find_libjvm(java_home)
|
Retrieves the Java library path according to the real installation of
the java executable
:return: The path to the JVM library, or None
|
train
|
https://github.com/jpype-project/jpype/blob/3ce953ae7b35244077249ce650b9acd0a7010d17/jpype/_linux.py#L49-L64
|
[
"def find_libjvm(self, java_home):\n \"\"\"\n Recursively looks for the given file\n\n :param java_home: A Java home folder\n :param filename: Name of the file to find\n :return: The first found file path, or None\n \"\"\"\n found_jamvm = False\n non_supported_jvm = ('cacao', 'jamvm')\n found_non_supported_jvm = False\n\n # Look for the file\n for root, _, names in os.walk(java_home):\n if self._libfile in names:\n # Found it, but check for non supported jvms\n candidate = os.path.split(root)[1]\n if candidate in non_supported_jvm:\n found_non_supported_jvm = True\n continue # maybe we will find another one?\n return os.path.join(root, self._libfile)\n"
] |
class LinuxJVMFinder(_jvmfinder.JVMFinder):
"""
Linux JVM library finder class
"""
def __init__(self):
"""
Sets up members
"""
# Call the parent constructor
_jvmfinder.JVMFinder.__init__(self)
# Java bin file
self._java = "/usr/bin/java"
# Library file name
self._libfile = "libjvm.so"
# Predefined locations
self._locations = ("/usr/lib/jvm", "/usr/java", "/opt/sun")
# Search methods
self._methods = (self._get_from_java_home,
self._get_from_bin,
self._get_from_known_locations)
|
jpype-project/jpype
|
jpype/imports.py
|
_JImportFactory
|
python
|
def _JImportFactory(spec, javaname, cls=_JImport):
def init(self, name):
# Call the base class
cls.__init__(self, name)
def getall(self):
global _exportTypes
d1 = self.__dict__.items()
d2 = self.__class__.__dict__.items()
local = [name for name, attr in d1 if not name.startswith('_')
and isinstance(attr, _exportTypes)]
glob = [name for name, attr in d2 if not name.startswith('_')
and isinstance(attr, _exportTypes)]
local.extend(glob)
return local
# Set up a new class for this type
bases = [cls]
members = {
"__init__": init,
"__javaname__": javaname,
"__name__": spec.name,
"__all__": property(getall),
"__spec__": spec,
}
# Is this module also a class, if so insert class info
jclass = _getJavaClass(javaname)
if jclass:
# Mark this as a class (will cause children to be inner classes)
members['__javaclass__'] = jclass
# Exposed static members as part of the module
_copyProperties(members, jclass.__metaclass__)
_copyStaticMethods(members, jclass)
return type("module." + spec.name, tuple(bases), members)
|
(internal) Factory for creating java modules dynamically.
This is needed to create a new type node to hold static methods.
|
train
|
https://github.com/jpype-project/jpype/blob/3ce953ae7b35244077249ce650b9acd0a7010d17/jpype/imports.py#L201-L241
| null |
# -*- coding: utf-8 -*-
# *****************************************************************************
# Copyright 2017 Karl Einar Nelson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# *****************************************************************************
# Optional jpype module to support:
# import <java_pkg> [ as <name> ]
# import <java_pkg>.<java_class> [ as <name> ]
# from <java_pkg> import <java_class>[,<java_class>*]
# from <java_pkg> import <java_class> [ as <name> ]
# from <java_pkg>.<java_class> import <java_static> [ as <name> ]
# from <java_pkg>.<java_class> import <java_inner> [ as <name> ]
#
# jpype.imports.registerDomain(moduleName, alias=<java_pkg>)
# jpype.imports.registerImportCustomizer(JImportCustomizer)
#
# Requires Python 3.6 or later
# Usage:
# import jpype
# import jpype.imports
# <start or attach jvm>
# # Import java packages as modules
# from java.lang import String
try:
from importlib.machinery import ModuleSpec as _ModuleSpec
from types import ModuleType as _ModuleType
except Exception:
# For Python2 compatiblity
# (Note: customizers are not supported)
class _ModuleSpec(object):
def __init__(self, name, loader):
self.name = name
self.loader = loader
_ModuleType = object
import _jpype
import sys as _sys
import keyword as _keyword
from ._jclass import JClass as _JClass
from ._jclass import _JavaClass as _JavaClass
from ._core import registerJVMInitializer as _jinit
__all__ = ["registerImportCustomizer", "registerDomain", "JImportCustomizer"]
_exportTypes = ()
_modifier = None
# %% Utility
def _keywordUnwrap(name):
if not name.endswith('_'):
return name
if _keyword.iskeyword(name[:-1]):
return name[:-1]
return name
def _keywordWrap(name):
if name in _keyword.kwlist:
return name + "_"
return name
def _getJavaClass(javaname):
try:
return _JClass(javaname)
except Exception:
return None
def _copyProperties(out, mc):
for v in dir(mc):
# Skip private members
if v.startswith('_'):
continue
# Copy properties
attr = getattr(mc, v)
if isinstance(attr, property):
out[v] = attr
def _getStaticMethods(cls):
global _modifier
static = {}
for u in cls.class_.getMethods():
if not _modifier.isStatic(u.getModifiers()):
continue
name = _keywordWrap(u.getName())
static[name] = getattr(cls, name)
return static
def _copyStaticMethods(out, cls):
for u, v in _getStaticMethods(cls).items():
out[u] = v
# %% Customizer
_CUSTOMIZERS = []
if _sys.version_info > (3,):
def registerImportCustomizer(customizer):
""" Import customizers can be used to import python packages
into java modules automatically.
"""
_CUSTOMIZERS.append(customizer)
# Support hook for placing other things into the java tree
class JImportCustomizer(object):
""" Base class for Import customizer.
Import customizers should implement canCustomize and getSpec.
Example:
| # Site packages for each java package are stored under $DEVEL/<java_pkg>/py
| class SiteCustomizer(jpype.imports.JImportCustomizer):
| def canCustomize(self, name):
| if name.startswith('org.mysite') and name.endswith('.py'):
| return True
| return False
| def getSpec(self, name):
| pname = name[:-3]
| devel = os.environ.get('DEVEL')
| path = os.path.join(devel, pname,'py','__init__.py')
| return importlib.util.spec_from_file_location(name, path)
"""
def canCustomize(self, name):
""" Determine if this path is to be treated differently
Return:
True if an alternative spec is required.
"""
return False
def getSpec(self, name):
""" Get the module spec for this module.
"""
raise NotImplementedError
else:
def registerImportCustomizer(customizer):
raise NotImplementedError("Import customizers not implemented for Python 2.x")
JImportCustomizer = object
# %% Import
class _JImport(object):
""" (internal) Base class for import java modules """
# Module requirements
__doc__ = None
__loader__ = None
__path__ = []
__package__ = "java"
def __init__(self, name):
pass
def __getattr__(self, name):
if name.startswith('_'):
return object.__getattribute__(self, name)
name = _keywordUnwrap(name)
# Inner class support
jname = object.__getattribute__(self, '__javaname__')
try:
object.__getattribute__(self, '__javaclass__')
jname = "$".join([jname, name])
except AttributeError:
jname = ".".join([jname, name])
# Get the class (if it exists)
jtype = _getJavaClass(jname)
if jtype:
# Cache it for later
object.__setattr__(self, name, jtype)
return jtype
# If the java class does not exist, throw a ClassNotFound exception
raise Exception("Unable to find java class " + jname)
def __setattr__(self, name, value):
if name.startswith('__'):
raise AttributeError("Module does not allow setting of %s" % name)
if hasattr(value, '__javaclass__'):
return object.__setattr__(self, name, getattr(value, '__javaclass__'))
if isinstance(value, (_JImport, _ModuleType)):
return object.__setattr__(self, name, value)
raise AttributeError("JImport may not set attribute %s" % name)
# In order to get properties to be attached to the _JImport class,
# we must create a dynamic class between
def _JModule(spec, javaname):
""" (internal) Front end for creating a java module dynamically """
cls = _JImportFactory(spec, javaname)
out = cls(spec.name)
return out
# %% Finder
class _JImportLoader:
""" (internal) Finder hook for importlib. """
def find_spec(self, name, path, target):
parts = name.split('.', 1)
if not parts[0] in _JDOMAINS:
return None
# Support for external modules in java tree
for customizer in _CUSTOMIZERS:
if customizer.canCustomize(name):
return customizer.getSpec(name)
# Import the java module
return _ModuleSpec(name, self)
""" (internal) Loader hook for importlib. """
def create_module(self, spec):
if not _jpype.isStarted():
raise ImportError("Attempt to create java modules without jvm")
# Handle creating the java name based on the path
parts = spec.name.split('.')
if len(parts) == 1:
return _JModule(spec, _JDOMAINS[spec.name])
# Use the parent module to simplify name mangling
base = _sys.modules[".".join(parts[:-1])]
# Support of inner classes
if not isinstance(base,_JImport):
return getattr(base, parts[-1])
jbasename = object.__getattribute__(base, '__javaname__')
try:
object.__getattribute(base, '__javaclass__')
javaname = "$".join([jbasename, _keywordUnwrap(parts[-1])])
except AttributeError:
javaname = ".".join([jbasename, _keywordUnwrap(parts[-1])])
return _JModule(spec, javaname)
def exec_module(self, fullname):
pass
# For compatablity with Python 2.7
def find_module(self, name, path=None):
parts = name.split('.', 1)
if not parts[0] in _JDOMAINS:
return None
return self
# For compatablity with Python 2.7
def load_module(self, name):
module = self.create_module(_ModuleSpec(name, self))
_sys.modules[name]=module
return module
# Install hooks into python importlib
_sys.meta_path.append(_JImportLoader())
# %% Domains
_JDOMAINS = {}
def registerDomain(mod, alias=None):
""" Add a java domain to python as a dynamic module.
Args:
mod is the name of the dynamic module
alias is the name of the java path. (optional)
"""
if not alias:
alias = mod
_JDOMAINS[mod] = alias
# Preregister common top level domains
registerDomain('com')
registerDomain('gov')
registerDomain('java')
registerDomain('org')
# %% Initialize
def _initialize():
global _exportTypes
global _modifier
_JMethod = type(_JClass('java.lang.Class').forName)
_modifier = _JClass('java.lang.reflect.Modifier')
_exportTypes = (property, _JavaClass, _JImport, _JMethod)
_jinit(_initialize)
|
jpype-project/jpype
|
jpype/imports.py
|
_JModule
|
python
|
def _JModule(spec, javaname):
cls = _JImportFactory(spec, javaname)
out = cls(spec.name)
return out
|
(internal) Front end for creating a java module dynamically
|
train
|
https://github.com/jpype-project/jpype/blob/3ce953ae7b35244077249ce650b9acd0a7010d17/jpype/imports.py#L243-L247
|
[
"def _JImportFactory(spec, javaname, cls=_JImport):\n \"\"\" (internal) Factory for creating java modules dynamically.\n\n This is needed to create a new type node to hold static methods.\n \"\"\"\n def init(self, name):\n # Call the base class\n cls.__init__(self, name)\n\n def getall(self):\n global _exportTypes\n d1 = self.__dict__.items()\n d2 = self.__class__.__dict__.items()\n local = [name for name, attr in d1 if not name.startswith('_')\n and isinstance(attr, _exportTypes)]\n glob = [name for name, attr in d2 if not name.startswith('_')\n and isinstance(attr, _exportTypes)]\n local.extend(glob)\n return local\n\n # Set up a new class for this type\n bases = [cls]\n members = {\n \"__init__\": init,\n \"__javaname__\": javaname,\n \"__name__\": spec.name,\n \"__all__\": property(getall),\n \"__spec__\": spec,\n }\n\n # Is this module also a class, if so insert class info\n jclass = _getJavaClass(javaname)\n if jclass:\n # Mark this as a class (will cause children to be inner classes)\n members['__javaclass__'] = jclass\n\n # Exposed static members as part of the module\n _copyProperties(members, jclass.__metaclass__)\n _copyStaticMethods(members, jclass)\n\n return type(\"module.\" + spec.name, tuple(bases), members)\n"
] |
# -*- coding: utf-8 -*-
# *****************************************************************************
# Copyright 2017 Karl Einar Nelson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# *****************************************************************************
# Optional jpype module to support:
# import <java_pkg> [ as <name> ]
# import <java_pkg>.<java_class> [ as <name> ]
# from <java_pkg> import <java_class>[,<java_class>*]
# from <java_pkg> import <java_class> [ as <name> ]
# from <java_pkg>.<java_class> import <java_static> [ as <name> ]
# from <java_pkg>.<java_class> import <java_inner> [ as <name> ]
#
# jpype.imports.registerDomain(moduleName, alias=<java_pkg>)
# jpype.imports.registerImportCustomizer(JImportCustomizer)
#
# Requires Python 3.6 or later
# Usage:
# import jpype
# import jpype.imports
# <start or attach jvm>
# # Import java packages as modules
# from java.lang import String
try:
from importlib.machinery import ModuleSpec as _ModuleSpec
from types import ModuleType as _ModuleType
except Exception:
# For Python2 compatiblity
# (Note: customizers are not supported)
class _ModuleSpec(object):
def __init__(self, name, loader):
self.name = name
self.loader = loader
_ModuleType = object
import _jpype
import sys as _sys
import keyword as _keyword
from ._jclass import JClass as _JClass
from ._jclass import _JavaClass as _JavaClass
from ._core import registerJVMInitializer as _jinit
__all__ = ["registerImportCustomizer", "registerDomain", "JImportCustomizer"]
_exportTypes = ()
_modifier = None
# %% Utility
def _keywordUnwrap(name):
if not name.endswith('_'):
return name
if _keyword.iskeyword(name[:-1]):
return name[:-1]
return name
def _keywordWrap(name):
if name in _keyword.kwlist:
return name + "_"
return name
def _getJavaClass(javaname):
try:
return _JClass(javaname)
except Exception:
return None
def _copyProperties(out, mc):
for v in dir(mc):
# Skip private members
if v.startswith('_'):
continue
# Copy properties
attr = getattr(mc, v)
if isinstance(attr, property):
out[v] = attr
def _getStaticMethods(cls):
global _modifier
static = {}
for u in cls.class_.getMethods():
if not _modifier.isStatic(u.getModifiers()):
continue
name = _keywordWrap(u.getName())
static[name] = getattr(cls, name)
return static
def _copyStaticMethods(out, cls):
for u, v in _getStaticMethods(cls).items():
out[u] = v
# %% Customizer
_CUSTOMIZERS = []
if _sys.version_info > (3,):
def registerImportCustomizer(customizer):
""" Import customizers can be used to import python packages
into java modules automatically.
"""
_CUSTOMIZERS.append(customizer)
# Support hook for placing other things into the java tree
class JImportCustomizer(object):
""" Base class for Import customizer.
Import customizers should implement canCustomize and getSpec.
Example:
| # Site packages for each java package are stored under $DEVEL/<java_pkg>/py
| class SiteCustomizer(jpype.imports.JImportCustomizer):
| def canCustomize(self, name):
| if name.startswith('org.mysite') and name.endswith('.py'):
| return True
| return False
| def getSpec(self, name):
| pname = name[:-3]
| devel = os.environ.get('DEVEL')
| path = os.path.join(devel, pname,'py','__init__.py')
| return importlib.util.spec_from_file_location(name, path)
"""
def canCustomize(self, name):
""" Determine if this path is to be treated differently
Return:
True if an alternative spec is required.
"""
return False
def getSpec(self, name):
""" Get the module spec for this module.
"""
raise NotImplementedError
else:
def registerImportCustomizer(customizer):
raise NotImplementedError("Import customizers not implemented for Python 2.x")
JImportCustomizer = object
# %% Import
class _JImport(object):
""" (internal) Base class for import java modules """
# Module requirements
__doc__ = None
__loader__ = None
__path__ = []
__package__ = "java"
def __init__(self, name):
pass
def __getattr__(self, name):
if name.startswith('_'):
return object.__getattribute__(self, name)
name = _keywordUnwrap(name)
# Inner class support
jname = object.__getattribute__(self, '__javaname__')
try:
object.__getattribute__(self, '__javaclass__')
jname = "$".join([jname, name])
except AttributeError:
jname = ".".join([jname, name])
# Get the class (if it exists)
jtype = _getJavaClass(jname)
if jtype:
# Cache it for later
object.__setattr__(self, name, jtype)
return jtype
# If the java class does not exist, throw a ClassNotFound exception
raise Exception("Unable to find java class " + jname)
def __setattr__(self, name, value):
if name.startswith('__'):
raise AttributeError("Module does not allow setting of %s" % name)
if hasattr(value, '__javaclass__'):
return object.__setattr__(self, name, getattr(value, '__javaclass__'))
if isinstance(value, (_JImport, _ModuleType)):
return object.__setattr__(self, name, value)
raise AttributeError("JImport may not set attribute %s" % name)
# In order to get properties to be attached to the _JImport class,
# we must create a dynamic class between
def _JImportFactory(spec, javaname, cls=_JImport):
""" (internal) Factory for creating java modules dynamically.
This is needed to create a new type node to hold static methods.
"""
def init(self, name):
# Call the base class
cls.__init__(self, name)
def getall(self):
global _exportTypes
d1 = self.__dict__.items()
d2 = self.__class__.__dict__.items()
local = [name for name, attr in d1 if not name.startswith('_')
and isinstance(attr, _exportTypes)]
glob = [name for name, attr in d2 if not name.startswith('_')
and isinstance(attr, _exportTypes)]
local.extend(glob)
return local
# Set up a new class for this type
bases = [cls]
members = {
"__init__": init,
"__javaname__": javaname,
"__name__": spec.name,
"__all__": property(getall),
"__spec__": spec,
}
# Is this module also a class, if so insert class info
jclass = _getJavaClass(javaname)
if jclass:
# Mark this as a class (will cause children to be inner classes)
members['__javaclass__'] = jclass
# Exposed static members as part of the module
_copyProperties(members, jclass.__metaclass__)
_copyStaticMethods(members, jclass)
return type("module." + spec.name, tuple(bases), members)
# %% Finder
class _JImportLoader:
""" (internal) Finder hook for importlib. """
def find_spec(self, name, path, target):
parts = name.split('.', 1)
if not parts[0] in _JDOMAINS:
return None
# Support for external modules in java tree
for customizer in _CUSTOMIZERS:
if customizer.canCustomize(name):
return customizer.getSpec(name)
# Import the java module
return _ModuleSpec(name, self)
""" (internal) Loader hook for importlib. """
def create_module(self, spec):
if not _jpype.isStarted():
raise ImportError("Attempt to create java modules without jvm")
# Handle creating the java name based on the path
parts = spec.name.split('.')
if len(parts) == 1:
return _JModule(spec, _JDOMAINS[spec.name])
# Use the parent module to simplify name mangling
base = _sys.modules[".".join(parts[:-1])]
# Support of inner classes
if not isinstance(base,_JImport):
return getattr(base, parts[-1])
jbasename = object.__getattribute__(base, '__javaname__')
try:
object.__getattribute(base, '__javaclass__')
javaname = "$".join([jbasename, _keywordUnwrap(parts[-1])])
except AttributeError:
javaname = ".".join([jbasename, _keywordUnwrap(parts[-1])])
return _JModule(spec, javaname)
def exec_module(self, fullname):
pass
# For compatablity with Python 2.7
def find_module(self, name, path=None):
parts = name.split('.', 1)
if not parts[0] in _JDOMAINS:
return None
return self
# For compatablity with Python 2.7
def load_module(self, name):
module = self.create_module(_ModuleSpec(name, self))
_sys.modules[name]=module
return module
# Install hooks into python importlib
_sys.meta_path.append(_JImportLoader())
# %% Domains
_JDOMAINS = {}
def registerDomain(mod, alias=None):
""" Add a java domain to python as a dynamic module.
Args:
mod is the name of the dynamic module
alias is the name of the java path. (optional)
"""
if not alias:
alias = mod
_JDOMAINS[mod] = alias
# Preregister common top level domains
registerDomain('com')
registerDomain('gov')
registerDomain('java')
registerDomain('org')
# %% Initialize
def _initialize():
global _exportTypes
global _modifier
_JMethod = type(_JClass('java.lang.Class').forName)
_modifier = _JClass('java.lang.reflect.Modifier')
_exportTypes = (property, _JavaClass, _JImport, _JMethod)
_jinit(_initialize)
|
jpype-project/jpype
|
jpype/_properties.py
|
_extract_accessor_pairs
|
python
|
def _extract_accessor_pairs(members):
accessor_pairs = {}
for name, member in members.items():
if not (len(name) > _PROPERTY_ACCESSOR_PREFIX_LEN \
and _is_java_method(member)):
continue
access, rest = ( name[:_PROPERTY_ACCESSOR_PREFIX_LEN],
name[_PROPERTY_ACCESSOR_PREFIX_LEN:] )
property_name = rest[:1].lower() + rest[1:]
if property_name in members:
if _is_java_method(members[property_name]):
continue
if access == 'get' and member.isBeanAccessor():
try:
pair = accessor_pairs[property_name]
pair[0] = member
except KeyError:
accessor_pairs[property_name] = [member, None]
elif access == 'set' and member.isBeanMutator():
try:
pair = accessor_pairs[property_name]
pair[1] = member
except KeyError:
accessor_pairs[property_name] = [None, member]
return accessor_pairs
|
Extract pairs of corresponding property access methods
(getter and setter) from a Java class's members (attributes).
If a public method with a property's name exists no pair for
that property will be extracted.
Returns a dictionary with the property name as key and a tuple
of (getter method, setter method) as value. A tuple element
value might be `None` if only a getter or only a setter
exists.
|
train
|
https://github.com/jpype-project/jpype/blob/3ce953ae7b35244077249ce650b9acd0a7010d17/jpype/_properties.py#L26-L62
|
[
"def _is_java_method(attribute):\n return isinstance(attribute, _jpype._JavaMethod)\n"
] |
#*****************************************************************************
# Copyright 2004-2008 Steve Menard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#*****************************************************************************
import _jpype
from . import _jclass
from ._pykeywords import KEYWORDS
_PROPERTY_ACCESSOR_PREFIX_LEN = 3
def _initialize() :
_jclass.registerClassCustomizer(PropertiesCustomizer())
def _is_java_method(attribute):
return isinstance(attribute, _jpype._JavaMethod)
class PropertiesCustomizer(object) :
def canCustomize(self, name, jc) :
return True
def customize(self, class_name, jc, bases, members) :
accessor_pairs = _extract_accessor_pairs(members)
for attr_name, (getter, setter) in accessor_pairs.items():
# class is will be static to match Type.class in Java
if attr_name=='class':
continue
# Handle keyword conflicts
if attr_name in KEYWORDS:
attr_name += "_"
if attr_name in members:
if not getter:
# add default getter if we
# only have a setter
getter = members[attr_name].fget
elif not setter:
# add default setter if we
# only have a getter
setter = members[attr_name].fset
members[attr_name] = property(getter, setter)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.