repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
gmdzy2010/dingtalk_sdk_gmdzy2010
|
dingtalk_sdk_gmdzy2010/message_request.py
|
GetReadGroupChatUserIdsRequest.get_read_user_ids
|
python
|
def get_read_user_ids(self):
"""Method to get chatid of group created."""
read_user_ids = self.json_response.get("readUserIdList", None)
self.logger.info("%s\t%s" % (self.request_method, self.request_url))
return read_user_ids
|
Method to get chatid of group created.
|
train
|
https://github.com/gmdzy2010/dingtalk_sdk_gmdzy2010/blob/b06cb1f78f89be9554dcb6101af8bc72718a9ecd/dingtalk_sdk_gmdzy2010/message_request.py#L192-L196
| null |
class GetReadGroupChatUserIdsRequest(BaseRequest):
"""
Description: The GetReadGroupChatUserIdsRequest aims to get the user id
list of whom has read message by messageId
parameter_R: <access_token>, <messageId>, <cursor>, <size>
parameter_O: None
post_data_R: <chatid>, <msg>
post_data_O: None
Return: the user id list which has read the message
doc_links: https://open-doc.dingtalk.com/microapp/serverapi2/isu6nk
"""
request_url = settings.GET_READ_USERS
def get_read_user_ids(self):
"""Method to get chatid of group created."""
read_user_ids = self.json_response.get("readUserIdList", None)
self.logger.info("%s\t%s" % (self.request_method, self.request_url))
return read_user_ids
|
gmdzy2010/dingtalk_sdk_gmdzy2010
|
dingtalk_sdk_gmdzy2010/user_request.py
|
DeptUserRequest.get_userinfo
|
python
|
def get_userinfo(self):
"""Method to get current user's name, mobile, email and position."""
wanted_fields = ["name", "mobile", "orgEmail", "position", "avatar"]
userinfo = {k: self.json_response.get(k, None) for k in wanted_fields}
return userinfo
|
Method to get current user's name, mobile, email and position.
|
train
|
https://github.com/gmdzy2010/dingtalk_sdk_gmdzy2010/blob/b06cb1f78f89be9554dcb6101af8bc72718a9ecd/dingtalk_sdk_gmdzy2010/user_request.py#L30-L34
| null |
class DeptUserRequest(BaseRequest):
"""
Description: The response of DeptUserRequest contains a department member's
detail information specified by userid
parameter_R: <access_token>, <userid>
parameter_O: None
post_data_R: None
post_data_O: None
Return: a department member's detail information specified by userid
doc_links: https://open-doc.dingtalk.com/microapp/serverapi2/ege851
"""
request_url = settings.GET_DEPT_USER
def get_unionid(self):
"""Method to get the unionid"""
return self.json_response.get("unionid", None)
def get_openid(self):
"""Method to get the openid"""
return self.json_response.get("openid", None)
def get_userinfo(self):
"""Method to get current user's name, mobile, email and position."""
wanted_fields = ["name", "mobile", "orgEmail", "position", "avatar"]
userinfo = {k: self.json_response.get(k, None) for k in wanted_fields}
return userinfo
|
gmdzy2010/dingtalk_sdk_gmdzy2010
|
dingtalk_sdk_gmdzy2010/user_request.py
|
AdminUsersRequest.get_admin_ids
|
python
|
def get_admin_ids(self):
"""Method to get the administrator id list."""
admins = self.json_response.get("admin_list", None)
admin_ids = [admin_id for admin_id in admins["userid"]]
return admin_ids
|
Method to get the administrator id list.
|
train
|
https://github.com/gmdzy2010/dingtalk_sdk_gmdzy2010/blob/b06cb1f78f89be9554dcb6101af8bc72718a9ecd/dingtalk_sdk_gmdzy2010/user_request.py#L54-L58
| null |
class AdminUsersRequest(BaseRequest):
"""
Description: The response of AdminUsersRequest contains all admin user's id
list
parameter_R: <access_token>
parameter_O: None
post_data_R: None
post_data_O: None
Return: admin user's id list
doc_links: https://open-doc.dingtalk.com/microapp/serverapi2/ege851
"""
request_url = settings.GET_ADMIN_USERS
def get_admin_ids(self):
"""Method to get the administrator id list."""
admins = self.json_response.get("admin_list", None)
admin_ids = [admin_id for admin_id in admins["userid"]]
return admin_ids
|
gmdzy2010/dingtalk_sdk_gmdzy2010
|
dingtalk_sdk_gmdzy2010/department_request.py
|
DeptRequest.get_dept_name
|
python
|
def get_dept_name(self):
"""Method to get the department name"""
self.logger.info("%s\t%s" % (self.request_method, self.request_url))
return self.json_response.get("name", None)
|
Method to get the department name
|
train
|
https://github.com/gmdzy2010/dingtalk_sdk_gmdzy2010/blob/b06cb1f78f89be9554dcb6101af8bc72718a9ecd/dingtalk_sdk_gmdzy2010/department_request.py#L22-L25
| null |
class DeptRequest(BaseRequest):
"""
Description: The response of DeptRequest contains detail of specified
department by parameter <id>
parameter_R: <access_token>, <id> (department_id)
parameter_O: None
post_data_R: None
post_data_O: None
Return: all information fields for specified department
doc_links: https://open-doc.dingtalk.com/microapp/serverapi2/dubakq
"""
request_url = settings.GET_DEPT
def get_dept_name(self):
"""Method to get the department name"""
self.logger.info("%s\t%s" % (self.request_method, self.request_url))
return self.json_response.get("name", None)
def get_dept_manager_ids(self):
"""Method to get the id list of department manager."""
self.logger.info("%s\t%s" % (self.request_method, self.request_url))
return self.json_response.get("deptManagerUseridList", None)
|
gmdzy2010/dingtalk_sdk_gmdzy2010
|
dingtalk_sdk_gmdzy2010/department_request.py
|
DeptRequest.get_dept_manager_ids
|
python
|
def get_dept_manager_ids(self):
"""Method to get the id list of department manager."""
self.logger.info("%s\t%s" % (self.request_method, self.request_url))
return self.json_response.get("deptManagerUseridList", None)
|
Method to get the id list of department manager.
|
train
|
https://github.com/gmdzy2010/dingtalk_sdk_gmdzy2010/blob/b06cb1f78f89be9554dcb6101af8bc72718a9ecd/dingtalk_sdk_gmdzy2010/department_request.py#L27-L30
| null |
class DeptRequest(BaseRequest):
"""
Description: The response of DeptRequest contains detail of specified
department by parameter <id>
parameter_R: <access_token>, <id> (department_id)
parameter_O: None
post_data_R: None
post_data_O: None
Return: all information fields for specified department
doc_links: https://open-doc.dingtalk.com/microapp/serverapi2/dubakq
"""
request_url = settings.GET_DEPT
def get_dept_name(self):
"""Method to get the department name"""
self.logger.info("%s\t%s" % (self.request_method, self.request_url))
return self.json_response.get("name", None)
def get_dept_manager_ids(self):
"""Method to get the id list of department manager."""
self.logger.info("%s\t%s" % (self.request_method, self.request_url))
return self.json_response.get("deptManagerUseridList", None)
|
gmdzy2010/dingtalk_sdk_gmdzy2010
|
dingtalk_sdk_gmdzy2010/department_request.py
|
DeptsRequest.get_depts
|
python
|
def get_depts(self, dept_name=None):
"""Method to get department by name."""
depts = self.json_response.get("department", None)
params = self.kwargs.get("params", None)
fetch_child = params.get("fetch_child", True) if params else True
if dept_name is not None:
depts = [dept for dept in depts if dept["name"] == dept_name]
depts = [{"id": dept["id"], "name": dept["name"]} for dept in depts]
self.logger.info("%s\t%s" % (self.request_method, self.request_url))
return depts if fetch_child else depts[0]
|
Method to get department by name.
|
train
|
https://github.com/gmdzy2010/dingtalk_sdk_gmdzy2010/blob/b06cb1f78f89be9554dcb6101af8bc72718a9ecd/dingtalk_sdk_gmdzy2010/department_request.py#L52-L61
| null |
class DeptsRequest(BaseRequest):
"""
Description: The response of DeptsRequest contains a department list, but
each element just contain the main fields of id, name, parentid,
createDeptGroup and autoAddUser. fetch_child (Bool, default is True,
it decides whether recursively response the sub-department)
parameter_R: <access_token>, <id> (department_id)
parameter_O: <fetch_child>
post_data_R: None
post_data_O: None
Return: a department list briefed
doc_links: https://open-doc.dingtalk.com/microapp/serverapi2/dubakq
"""
request_url = settings.GET_DEPTS
def get_depts(self, dept_name=None):
"""Method to get department by name."""
depts = self.json_response.get("department", None)
params = self.kwargs.get("params", None)
fetch_child = params.get("fetch_child", True) if params else True
if dept_name is not None:
depts = [dept for dept in depts if dept["name"] == dept_name]
depts = [{"id": dept["id"], "name": dept["name"]} for dept in depts]
self.logger.info("%s\t%s" % (self.request_method, self.request_url))
return depts if fetch_child else depts[0]
|
gmdzy2010/dingtalk_sdk_gmdzy2010
|
dingtalk_sdk_gmdzy2010/department_request.py
|
SubDeptIdsRequest.get_sub_dept_ids
|
python
|
def get_sub_dept_ids(self):
"""Method to get the department list"""
self.logger.info("%s\t%s" % (self.request_method, self.request_url))
return self.json_response.get("sub_dept_id_list", None)
|
Method to get the department list
|
train
|
https://github.com/gmdzy2010/dingtalk_sdk_gmdzy2010/blob/b06cb1f78f89be9554dcb6101af8bc72718a9ecd/dingtalk_sdk_gmdzy2010/department_request.py#L81-L84
| null |
class SubDeptIdsRequest(BaseRequest):
"""
Description: The response of SubDeptIdsRequest contains the id list of sub
department for specified department by id
parameter_R: <access_token>, <id> (department_id)
parameter_O: None
post_data_R: None
post_data_O: None
Return: the id list of sub department
doc_links: https://open-doc.dingtalk.com/microapp/serverapi2/dubakq
"""
request_url = settings.GET_SUB_DEPT_IDS
def get_sub_dept_ids(self):
"""Method to get the department list"""
self.logger.info("%s\t%s" % (self.request_method, self.request_url))
return self.json_response.get("sub_dept_id_list", None)
|
gmdzy2010/dingtalk_sdk_gmdzy2010
|
dingtalk_sdk_gmdzy2010/department_request.py
|
ParentDeptPathRequest.get_parent_dept_path
|
python
|
def get_parent_dept_path(self):
"""Method to get the department list"""
self.logger.info("%s\t%s" % (self.request_method, self.request_url))
return self.json_response.get("parentIds", None)
|
Method to get the department list
|
train
|
https://github.com/gmdzy2010/dingtalk_sdk_gmdzy2010/blob/b06cb1f78f89be9554dcb6101af8bc72718a9ecd/dingtalk_sdk_gmdzy2010/department_request.py#L105-L108
| null |
class ParentDeptPathRequest(BaseRequest):
"""
Description: The response of ParentDeptPathRequest aimed at showing the
department tree reversely, the path, i.e, find the path from the initial
leaf node to the root node.
parameter_R: <access_token>, <id> (department_id)
parameter_O: None
post_data_R: None
post_data_O: None
Return: the path to root node(id)
doc_links: https://open-doc.dingtalk.com/microapp/serverapi2/dubakq
"""
request_url = settings.GET_PARENT_DEPT_PATH
def get_parent_dept_path(self):
"""Method to get the department list"""
self.logger.info("%s\t%s" % (self.request_method, self.request_url))
return self.json_response.get("parentIds", None)
|
praekeltfoundation/seed-message-sender
|
message_sender/factory.py
|
GenericHttpApiSender._get_filename
|
python
|
def _get_filename(self, path):
match = re.search("[a-z]{2,3}_[A-Z]{2}", path)
if match:
start = match.start(0)
filename = path[start:]
else:
filename = os.path.basename(path)
return filename
|
This function gets the base filename from the path, if a language code
is present the filename will start from there.
|
train
|
https://github.com/praekeltfoundation/seed-message-sender/blob/257b01635171b9dbe1f5f13baa810c971bb2620e/message_sender/factory.py#L72-L85
| null |
class GenericHttpApiSender(VumiHttpApiSender):
def __init__(
self,
url,
auth=None,
from_addr=None,
session=None,
override_payload=None,
strip_filepath=False,
):
"""
:param url str: The URL for the HTTP API channel
:param auth tuple: (username, password) or anything
accepted by the requests library. Defaults to None.
:param session requests.Session: A requests session. Defaults to None
:param from_addr str: The from address for all messages. Defaults to
None
:param override_payload dict: This is the format of the payload that
needs to be sent to the URL. It willl be populated from the
original payload. Defaults to None
:param strip_filepath boolean: This should be true if we only need to
send the filename to the api.
"""
self.api_url = url
self.auth = tuple(auth) if isinstance(auth, list) else auth
self.from_addr = from_addr
if session is None:
session = requests.Session()
self.session = session
self.override_payload = override_payload
self.strip_filepath = strip_filepath
def _raw_send(self, py_data):
headers = {"content-type": "application/json; charset=utf-8"}
channel_data = py_data.get("helper_metadata", {})
channel_data["session_event"] = py_data.get("session_event")
url = channel_data.get("voice", {}).get("speech_url")
if self.strip_filepath and url:
if not isinstance(url, (list, tuple)):
channel_data["voice"]["speech_url"] = self._get_filename(url)
else:
channel_data["voice"]["speech_url"] = []
for item in url:
channel_data["voice"]["speech_url"].append(self._get_filename(item))
data = {
"to": py_data["to_addr"],
"from": self.from_addr,
"content": py_data["content"],
"channel_data": channel_data,
}
data = self._override_payload(data)
data = json.dumps(data)
r = self.session.post(
self.api_url,
auth=self.auth,
data=data,
headers=headers,
timeout=settings.DEFAULT_REQUEST_TIMEOUT,
)
r.raise_for_status()
res = r.json()
return res.get("result", {})
def _override_payload(self, payload):
"""
This function transforms the payload into a new format using the
self.override_payload property.
"""
if self.override_payload:
old_payload = payload
def get_value(data, key):
try:
parent_key, nested_key = key.split(".", 1)
return get_value(data.get(parent_key, {}), nested_key)
except ValueError:
return data.get(key, key)
def set_values(data):
for key, value in data.items():
if isinstance(value, dict):
set_values(value)
else:
data[key] = get_value(old_payload, value)
payload = deepcopy(self.override_payload)
set_values(payload)
return payload
def fire_metric(self, metric, value, agg="last"):
raise HttpApiSenderException("Metrics sending not supported")
|
praekeltfoundation/seed-message-sender
|
message_sender/factory.py
|
GenericHttpApiSender._override_payload
|
python
|
def _override_payload(self, payload):
if self.override_payload:
old_payload = payload
def get_value(data, key):
try:
parent_key, nested_key = key.split(".", 1)
return get_value(data.get(parent_key, {}), nested_key)
except ValueError:
return data.get(key, key)
def set_values(data):
for key, value in data.items():
if isinstance(value, dict):
set_values(value)
else:
data[key] = get_value(old_payload, value)
payload = deepcopy(self.override_payload)
set_values(payload)
return payload
|
This function transforms the payload into a new format using the
self.override_payload property.
|
train
|
https://github.com/praekeltfoundation/seed-message-sender/blob/257b01635171b9dbe1f5f13baa810c971bb2620e/message_sender/factory.py#L123-L148
| null |
class GenericHttpApiSender(VumiHttpApiSender):
def __init__(
self,
url,
auth=None,
from_addr=None,
session=None,
override_payload=None,
strip_filepath=False,
):
"""
:param url str: The URL for the HTTP API channel
:param auth tuple: (username, password) or anything
accepted by the requests library. Defaults to None.
:param session requests.Session: A requests session. Defaults to None
:param from_addr str: The from address for all messages. Defaults to
None
:param override_payload dict: This is the format of the payload that
needs to be sent to the URL. It willl be populated from the
original payload. Defaults to None
:param strip_filepath boolean: This should be true if we only need to
send the filename to the api.
"""
self.api_url = url
self.auth = tuple(auth) if isinstance(auth, list) else auth
self.from_addr = from_addr
if session is None:
session = requests.Session()
self.session = session
self.override_payload = override_payload
self.strip_filepath = strip_filepath
def _get_filename(self, path):
"""
This function gets the base filename from the path, if a language code
is present the filename will start from there.
"""
match = re.search("[a-z]{2,3}_[A-Z]{2}", path)
if match:
start = match.start(0)
filename = path[start:]
else:
filename = os.path.basename(path)
return filename
def _raw_send(self, py_data):
headers = {"content-type": "application/json; charset=utf-8"}
channel_data = py_data.get("helper_metadata", {})
channel_data["session_event"] = py_data.get("session_event")
url = channel_data.get("voice", {}).get("speech_url")
if self.strip_filepath and url:
if not isinstance(url, (list, tuple)):
channel_data["voice"]["speech_url"] = self._get_filename(url)
else:
channel_data["voice"]["speech_url"] = []
for item in url:
channel_data["voice"]["speech_url"].append(self._get_filename(item))
data = {
"to": py_data["to_addr"],
"from": self.from_addr,
"content": py_data["content"],
"channel_data": channel_data,
}
data = self._override_payload(data)
data = json.dumps(data)
r = self.session.post(
self.api_url,
auth=self.auth,
data=data,
headers=headers,
timeout=settings.DEFAULT_REQUEST_TIMEOUT,
)
r.raise_for_status()
res = r.json()
return res.get("result", {})
def fire_metric(self, metric, value, agg="last"):
raise HttpApiSenderException("Metrics sending not supported")
|
praekeltfoundation/seed-message-sender
|
message_sender/factory.py
|
WhatsAppApiSender.fire_failed_contact_lookup
|
python
|
def fire_failed_contact_lookup(self, msisdn):
payload = {"address": msisdn}
# We cannot user the raw_hook_event here, because we don't have a user, so we
# manually filter and send the hooks for all users
hooks = Hook.objects.filter(event="whatsapp.failed_contact_check")
for hook in hooks:
hook.deliver_hook(
None, payload_override={"hook": hook.dict(), "data": payload}
)
|
Fires a webhook in the event of a failed WhatsApp contact lookup.
|
train
|
https://github.com/praekeltfoundation/seed-message-sender/blob/257b01635171b9dbe1f5f13baa810c971bb2620e/message_sender/factory.py#L321-L332
| null |
class WhatsAppApiSender(object):
def __init__(
self, api_url, token, hsm_namespace, hsm_element_name, ttl, session=None
):
self.api_url = api_url
self.token = token
self.hsm_namespace = hsm_namespace
self.hsm_element_name = hsm_element_name
self.ttl = ttl
distribution = pkg_resources.get_distribution("seed_message_sender")
# reuse sessions on tokens to make use of SSL keep-alive
# but keep some separation around auth
self.session = session or WHATSAPP_SESSIONS.setdefault(
token, requests.Session()
)
self.session.headers.update(
{
"Authorization": "Bearer %s" % (self.token,),
"User-Agent": "SeedMessageSender/%s" % (distribution.version,),
}
)
def get_contact(self, msisdn):
"""
Returns the WhatsApp ID for the given MSISDN
"""
response = self.session.post(
urllib_parse.urljoin(self.api_url, "/v1/contacts"),
json={"blocking": "wait", "contacts": [msisdn]},
)
response.raise_for_status()
whatsapp_id = response.json()["contacts"][0].get("wa_id")
if not whatsapp_id:
self.fire_failed_contact_lookup(msisdn)
return whatsapp_id
def send_hsm(self, whatsapp_id, content):
data = {
"to": whatsapp_id,
"type": "hsm",
"hsm": {
"namespace": self.hsm_namespace,
"element_name": self.hsm_element_name,
"localizable_params": [{"default": content}],
},
}
if self.ttl is not None:
data["ttl"] = self.ttl
response = self.session.post(
urllib_parse.urljoin(self.api_url, "/v1/messages"), json=data
)
return self.return_response(response)
def send_custom_hsm(self, whatsapp_id, template_name, language, variables):
"""
Sends an HSM with more customizable fields than the send_hsm function
"""
data = {
"to": whatsapp_id,
"type": "hsm",
"hsm": {
"namespace": self.hsm_namespace,
"element_name": template_name,
"language": {"policy": "deterministic", "code": language},
"localizable_params": [{"default": variable} for variable in variables],
},
}
if self.ttl is not None:
data["ttl"] = self.ttl
response = self.session.post(
urllib_parse.urljoin(self.api_url, "/v1/messages"), json=data
)
return self.return_response(response)
def send_text_message(self, whatsapp_id, content):
response = self.session.post(
urllib_parse.urljoin(self.api_url, "/v1/messages"),
json={"to": whatsapp_id, "text": {"body": content}},
)
return self.return_response(response)
def return_response(self, response):
try:
response.raise_for_status()
except requests_exceptions.HTTPError as exc:
resp = exc.response.text
if not ("1006" in resp and "unknown contact" in resp):
raise exc
return response.json()
def send_text(self, to_addr, content, session_event=None, metadata=None):
whatsapp_id = to_addr.replace("+", "")
def send_message():
if metadata and "template" in metadata:
template = metadata["template"]
d = self.send_custom_hsm(
whatsapp_id,
template["name"],
template["language"],
template["variables"],
)
elif self.hsm_namespace and self.hsm_element_name:
d = self.send_hsm(whatsapp_id, content)
else:
d = self.send_text_message(whatsapp_id, content)
return d
data = send_message()
if (
"errors" in data
and data["errors"][0]["code"] == 1006
and data["errors"][0]["details"] == "unknown contact"
):
whatsapp_id = self.get_contact(to_addr)
if not whatsapp_id:
return {"message_id": None}
data = send_message()
return {"message_id": data["messages"][0]["id"]}
def send_image(self, to_addr, content, image_url=None):
raise WhatsAppApiSenderException("Image sending not supported")
def send_voice(
self, to_addr, content, speech_url=None, wait_for=None, session_event=None
):
raise WhatsAppApiSenderException("Voice sending not supported")
def fire_metric(self, metric, value, agg="last"):
raise WhatsAppApiSenderException("Metrics sending not supported")
|
praekeltfoundation/seed-message-sender
|
message_sender/factory.py
|
WhatsAppApiSender.get_contact
|
python
|
def get_contact(self, msisdn):
response = self.session.post(
urllib_parse.urljoin(self.api_url, "/v1/contacts"),
json={"blocking": "wait", "contacts": [msisdn]},
)
response.raise_for_status()
whatsapp_id = response.json()["contacts"][0].get("wa_id")
if not whatsapp_id:
self.fire_failed_contact_lookup(msisdn)
return whatsapp_id
|
Returns the WhatsApp ID for the given MSISDN
|
train
|
https://github.com/praekeltfoundation/seed-message-sender/blob/257b01635171b9dbe1f5f13baa810c971bb2620e/message_sender/factory.py#L334-L346
|
[
"def fire_failed_contact_lookup(self, msisdn):\n \"\"\"\n Fires a webhook in the event of a failed WhatsApp contact lookup.\n \"\"\"\n payload = {\"address\": msisdn}\n # We cannot user the raw_hook_event here, because we don't have a user, so we\n # manually filter and send the hooks for all users\n hooks = Hook.objects.filter(event=\"whatsapp.failed_contact_check\")\n for hook in hooks:\n hook.deliver_hook(\n None, payload_override={\"hook\": hook.dict(), \"data\": payload}\n )\n"
] |
class WhatsAppApiSender(object):
def __init__(
self, api_url, token, hsm_namespace, hsm_element_name, ttl, session=None
):
self.api_url = api_url
self.token = token
self.hsm_namespace = hsm_namespace
self.hsm_element_name = hsm_element_name
self.ttl = ttl
distribution = pkg_resources.get_distribution("seed_message_sender")
# reuse sessions on tokens to make use of SSL keep-alive
# but keep some separation around auth
self.session = session or WHATSAPP_SESSIONS.setdefault(
token, requests.Session()
)
self.session.headers.update(
{
"Authorization": "Bearer %s" % (self.token,),
"User-Agent": "SeedMessageSender/%s" % (distribution.version,),
}
)
def fire_failed_contact_lookup(self, msisdn):
"""
Fires a webhook in the event of a failed WhatsApp contact lookup.
"""
payload = {"address": msisdn}
# We cannot user the raw_hook_event here, because we don't have a user, so we
# manually filter and send the hooks for all users
hooks = Hook.objects.filter(event="whatsapp.failed_contact_check")
for hook in hooks:
hook.deliver_hook(
None, payload_override={"hook": hook.dict(), "data": payload}
)
def send_hsm(self, whatsapp_id, content):
data = {
"to": whatsapp_id,
"type": "hsm",
"hsm": {
"namespace": self.hsm_namespace,
"element_name": self.hsm_element_name,
"localizable_params": [{"default": content}],
},
}
if self.ttl is not None:
data["ttl"] = self.ttl
response = self.session.post(
urllib_parse.urljoin(self.api_url, "/v1/messages"), json=data
)
return self.return_response(response)
def send_custom_hsm(self, whatsapp_id, template_name, language, variables):
"""
Sends an HSM with more customizable fields than the send_hsm function
"""
data = {
"to": whatsapp_id,
"type": "hsm",
"hsm": {
"namespace": self.hsm_namespace,
"element_name": template_name,
"language": {"policy": "deterministic", "code": language},
"localizable_params": [{"default": variable} for variable in variables],
},
}
if self.ttl is not None:
data["ttl"] = self.ttl
response = self.session.post(
urllib_parse.urljoin(self.api_url, "/v1/messages"), json=data
)
return self.return_response(response)
def send_text_message(self, whatsapp_id, content):
response = self.session.post(
urllib_parse.urljoin(self.api_url, "/v1/messages"),
json={"to": whatsapp_id, "text": {"body": content}},
)
return self.return_response(response)
def return_response(self, response):
try:
response.raise_for_status()
except requests_exceptions.HTTPError as exc:
resp = exc.response.text
if not ("1006" in resp and "unknown contact" in resp):
raise exc
return response.json()
def send_text(self, to_addr, content, session_event=None, metadata=None):
whatsapp_id = to_addr.replace("+", "")
def send_message():
if metadata and "template" in metadata:
template = metadata["template"]
d = self.send_custom_hsm(
whatsapp_id,
template["name"],
template["language"],
template["variables"],
)
elif self.hsm_namespace and self.hsm_element_name:
d = self.send_hsm(whatsapp_id, content)
else:
d = self.send_text_message(whatsapp_id, content)
return d
data = send_message()
if (
"errors" in data
and data["errors"][0]["code"] == 1006
and data["errors"][0]["details"] == "unknown contact"
):
whatsapp_id = self.get_contact(to_addr)
if not whatsapp_id:
return {"message_id": None}
data = send_message()
return {"message_id": data["messages"][0]["id"]}
def send_image(self, to_addr, content, image_url=None):
raise WhatsAppApiSenderException("Image sending not supported")
def send_voice(
self, to_addr, content, speech_url=None, wait_for=None, session_event=None
):
raise WhatsAppApiSenderException("Voice sending not supported")
def fire_metric(self, metric, value, agg="last"):
raise WhatsAppApiSenderException("Metrics sending not supported")
|
praekeltfoundation/seed-message-sender
|
message_sender/factory.py
|
WhatsAppApiSender.send_custom_hsm
|
python
|
def send_custom_hsm(self, whatsapp_id, template_name, language, variables):
data = {
"to": whatsapp_id,
"type": "hsm",
"hsm": {
"namespace": self.hsm_namespace,
"element_name": template_name,
"language": {"policy": "deterministic", "code": language},
"localizable_params": [{"default": variable} for variable in variables],
},
}
if self.ttl is not None:
data["ttl"] = self.ttl
response = self.session.post(
urllib_parse.urljoin(self.api_url, "/v1/messages"), json=data
)
return self.return_response(response)
|
Sends an HSM with more customizable fields than the send_hsm function
|
train
|
https://github.com/praekeltfoundation/seed-message-sender/blob/257b01635171b9dbe1f5f13baa810c971bb2620e/message_sender/factory.py#L366-L386
|
[
"def return_response(self, response):\n try:\n response.raise_for_status()\n except requests_exceptions.HTTPError as exc:\n resp = exc.response.text\n\n if not (\"1006\" in resp and \"unknown contact\" in resp):\n raise exc\n\n return response.json()\n"
] |
class WhatsAppApiSender(object):
def __init__(
self, api_url, token, hsm_namespace, hsm_element_name, ttl, session=None
):
self.api_url = api_url
self.token = token
self.hsm_namespace = hsm_namespace
self.hsm_element_name = hsm_element_name
self.ttl = ttl
distribution = pkg_resources.get_distribution("seed_message_sender")
# reuse sessions on tokens to make use of SSL keep-alive
# but keep some separation around auth
self.session = session or WHATSAPP_SESSIONS.setdefault(
token, requests.Session()
)
self.session.headers.update(
{
"Authorization": "Bearer %s" % (self.token,),
"User-Agent": "SeedMessageSender/%s" % (distribution.version,),
}
)
def fire_failed_contact_lookup(self, msisdn):
"""
Fires a webhook in the event of a failed WhatsApp contact lookup.
"""
payload = {"address": msisdn}
# We cannot user the raw_hook_event here, because we don't have a user, so we
# manually filter and send the hooks for all users
hooks = Hook.objects.filter(event="whatsapp.failed_contact_check")
for hook in hooks:
hook.deliver_hook(
None, payload_override={"hook": hook.dict(), "data": payload}
)
def get_contact(self, msisdn):
"""
Returns the WhatsApp ID for the given MSISDN
"""
response = self.session.post(
urllib_parse.urljoin(self.api_url, "/v1/contacts"),
json={"blocking": "wait", "contacts": [msisdn]},
)
response.raise_for_status()
whatsapp_id = response.json()["contacts"][0].get("wa_id")
if not whatsapp_id:
self.fire_failed_contact_lookup(msisdn)
return whatsapp_id
def send_hsm(self, whatsapp_id, content):
data = {
"to": whatsapp_id,
"type": "hsm",
"hsm": {
"namespace": self.hsm_namespace,
"element_name": self.hsm_element_name,
"localizable_params": [{"default": content}],
},
}
if self.ttl is not None:
data["ttl"] = self.ttl
response = self.session.post(
urllib_parse.urljoin(self.api_url, "/v1/messages"), json=data
)
return self.return_response(response)
def send_text_message(self, whatsapp_id, content):
response = self.session.post(
urllib_parse.urljoin(self.api_url, "/v1/messages"),
json={"to": whatsapp_id, "text": {"body": content}},
)
return self.return_response(response)
def return_response(self, response):
try:
response.raise_for_status()
except requests_exceptions.HTTPError as exc:
resp = exc.response.text
if not ("1006" in resp and "unknown contact" in resp):
raise exc
return response.json()
def send_text(self, to_addr, content, session_event=None, metadata=None):
whatsapp_id = to_addr.replace("+", "")
def send_message():
if metadata and "template" in metadata:
template = metadata["template"]
d = self.send_custom_hsm(
whatsapp_id,
template["name"],
template["language"],
template["variables"],
)
elif self.hsm_namespace and self.hsm_element_name:
d = self.send_hsm(whatsapp_id, content)
else:
d = self.send_text_message(whatsapp_id, content)
return d
data = send_message()
if (
"errors" in data
and data["errors"][0]["code"] == 1006
and data["errors"][0]["details"] == "unknown contact"
):
whatsapp_id = self.get_contact(to_addr)
if not whatsapp_id:
return {"message_id": None}
data = send_message()
return {"message_id": data["messages"][0]["id"]}
def send_image(self, to_addr, content, image_url=None):
raise WhatsAppApiSenderException("Image sending not supported")
def send_voice(
self, to_addr, content, speech_url=None, wait_for=None, session_event=None
):
raise WhatsAppApiSenderException("Voice sending not supported")
def fire_metric(self, metric, value, agg="last"):
raise WhatsAppApiSenderException("Metrics sending not supported")
|
praekeltfoundation/seed-message-sender
|
message_sender/views.py
|
process_event
|
python
|
def process_event(message_id, event_type, event_detail, timestamp):
# Load message
try:
message = Outbound.objects.select_related("channel").get(
vumi_message_id=message_id
)
except ObjectDoesNotExist:
return (False, "Cannot find message for ID {}".format(message_id))
if event_type == "ack":
message.delivered = True
message.to_addr = ""
message.metadata["ack_timestamp"] = timestamp
message.metadata["ack_reason"] = event_detail
message.save(update_fields=["delivered", "to_addr", "metadata"])
elif event_type == "nack":
message.metadata["nack_timestamp"] = timestamp
message.metadata["nack_reason"] = event_detail
message.save(update_fields=["metadata"])
decr_message_count(message)
send_message.delay(str(message.id))
elif event_type == "delivery_succeeded":
message.delivered = True
message.to_addr = ""
message.metadata["delivery_timestamp"] = timestamp
message.metadata["delivery_reason"] = event_detail
message.save(update_fields=["delivered", "metadata", "to_addr"])
elif event_type == "delivery_failed":
message.metadata["delivery_failed_reason"] = event_detail
message.metadata["delivery_failed_timestamp"] = timestamp
message.save(update_fields=["metadata"])
decr_message_count(message)
send_message.delay(str(message.id))
elif event_type == "read":
message.delivered = True
message.to_addr = ""
message.metadata["read_timestamp"] = timestamp
message.save(update_fields=["delivered", "to_addr", "metadata"])
outbound_event_total.labels(event_type, message.channel_id).inc()
fire_delivery_hook(message)
return (True, "Event processed")
|
Processes an event of the given details, returning a (success, message) tuple
|
train
|
https://github.com/praekeltfoundation/seed-message-sender/blob/257b01635171b9dbe1f5f13baa810c971bb2620e/message_sender/views.py#L324-L373
|
[
"def decr_message_count(message):\n if message.channel:\n channel = message.channel\n else:\n channel = Channel.objects.get(default=True)\n ConcurrencyLimiter.decr_message_count(channel, message.last_sent_time)\n",
"def fire_delivery_hook(outbound):\n outbound.refresh_from_db()\n # Only fire if the message has been delivered or we've reached max attempts\n if (\n not outbound.delivered\n and outbound.attempts < settings.MESSAGE_SENDER_MAX_RETRIES\n ):\n return\n\n payload = {\n \"outbound_id\": str(outbound.id),\n \"delivered\": outbound.delivered,\n \"to_addr\": outbound.to_addr,\n }\n if hasattr(outbound, \"to_identity\"):\n payload[\"identity\"] = outbound.to_identity\n\n if payload[\"to_addr\"] is None and payload.get(\"identity\", None) is None:\n raise InvalidMessage(outbound)\n\n # Becaues the Junebug event endpoint has no authentication, we get an\n # AnonymousUser object for the user. So we have to manually find all of the\n # hook events, ignoring the user, and deliver them.\n hooks = Hook.objects.filter(event=\"outbound.delivery_report\")\n for hook in hooks:\n hook.deliver_hook(None, payload_override={\"hook\": hook.dict(), \"data\": payload})\n"
] |
import base64
import hmac
from datetime import datetime, timedelta
from hashlib import sha256
from django import forms
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
from django.shortcuts import get_object_or_404
from django_filters import rest_framework as filters
from prometheus_client import Counter
from rest_framework import mixins, status, viewsets
from rest_framework.authtoken.models import Token
from rest_framework.exceptions import AuthenticationFailed
from rest_framework.filters import OrderingFilter
from rest_framework.pagination import CursorPagination
from rest_framework.permissions import AllowAny, IsAdminUser, IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_hooks.models import Hook
from seed_message_sender.utils import (
create_identity,
get_available_metrics,
get_identity_by_address,
)
from .formatters import e_164
from .models import (
AggregateOutbounds,
ArchivedOutbounds,
Channel,
Inbound,
InvalidMessage,
Outbound,
OutboundSendFailure,
)
from .serializers import (
AggregateOutboundSerializer,
ArchivedOutboundSerializer,
CreateUserSerializer,
EventSerializer,
HookSerializer,
InboundSerializer,
JunebugEventSerializer,
JunebugInboundSerializer,
OutboundSendFailureSerializer,
OutboundSerializer,
WassupEventSerializer,
WassupInboundSerializer,
WhatsAppWebhookSerializer,
WhatsAppEventSerializer,
WhatsAppInboundSerializer,
)
from .tasks import (
ConcurrencyLimiter,
aggregate_outbounds,
archive_outbound,
requeue_failed_tasks,
send_message,
)
# Uncomment line below if scheduled metrics are added
# from .tasks import scheduled_metrics
class IdCursorPagination(CursorPagination):
ordering = "-id"
class UserView(APIView):
""" API endpoint that allows users creation and returns their token.
Only admin users can do this to avoid permissions escalation.
"""
permission_classes = (IsAdminUser,)
def post(self, request):
"""Create a user and token, given an email. If user exists just
provide the token."""
serializer = CreateUserSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
email = serializer.validated_data.get("email")
try:
user = User.objects.get(username=email)
except User.DoesNotExist:
user = User.objects.create_user(email, email=email)
token, created = Token.objects.get_or_create(user=user)
return Response(status=status.HTTP_201_CREATED, data={"token": token.key})
class HookViewSet(viewsets.ModelViewSet):
"""
Retrieve, create, update or destroy webhooks.
"""
permission_classes = (IsAuthenticated,)
queryset = Hook.objects.all()
serializer_class = HookSerializer
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class MultipleField(forms.Field):
widget = forms.MultipleHiddenInput
def clean(self, value):
if value is None:
return None
return [super(MultipleField, self).clean(v) for v in value]
class MultipleFilter(filters.Filter):
field_class = MultipleField
def __init__(self, *args, **kwargs):
kwargs.setdefault("lookup_expr", "in")
super(MultipleFilter, self).__init__(*args, **kwargs)
class OutboundFilter(filters.FilterSet):
before = filters.IsoDateTimeFilter(field_name="created_at", lookup_expr="lte")
after = filters.IsoDateTimeFilter(field_name="created_at", lookup_expr="gte")
to_addr = MultipleFilter(field_name="to_addr")
to_identity = MultipleFilter(field_name="to_identity")
class Meta:
model = Outbound
fields = (
"version",
"vumi_message_id",
"delivered",
"attempts",
"created_at",
"updated_at",
"before",
"after",
)
class OutboundViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows Outbound models to be viewed or edited.
"""
permission_classes = (IsAuthenticated,)
queryset = Outbound.objects.all()
serializer_class = OutboundSerializer
filterset_class = OutboundFilter
filter_backends = (filters.DjangoFilterBackend, OrderingFilter)
ordering_fields = ("created_at",)
ordering = ("-created_at",)
def create(self, *args, **kwargs):
return super(OutboundViewSet, self).create(*args, **kwargs)
class InboundFilter(filters.FilterSet):
from_addr = MultipleFilter(field_name="from_addr")
from_identity = MultipleFilter(field_name="from_identity")
class Meta:
model = Inbound
fields = (
"message_id",
"in_reply_to",
"to_addr",
"content",
"transport_name",
"transport_type",
"created_at",
"updated_at",
)
class InboundPreprocessor(object):
def pop_from_address(self, data, channel):
if channel.channel_type == Channel.VUMI_TYPE:
return data.pop("from_addr", None)
elif channel.channel_type == Channel.JUNEBUG_TYPE:
return data.pop("from", None)
elif channel.channel_type == Channel.WASSUP_API_TYPE:
return data.get("data", {}).pop("from_addr", None)
elif channel.channel_type == Channel.WHATSAPP_API_TYPE:
return data.pop("from", None)
def is_close_event(self, data, channel):
if channel.channel_type == Channel.VUMI_TYPE:
return data.get("session_event") == "close"
elif channel.channel_type == Channel.JUNEBUG_TYPE:
return data.get("channel_data", {}).get("session_event") == "close"
# Wassup/WhatsApp doesn't have sessions
return False
def get_related_outbound_id(self, data, channel):
if channel.channel_type == Channel.VUMI_TYPE:
return data.get("in_reply_to")
elif channel.channel_type == Channel.JUNEBUG_TYPE:
return data.get("reply_to")
return None
def get_or_create_identity(self, msisdn):
result = get_identity_by_address(msisdn)
if result:
return result[0]["id"]
else:
return create_identity(
{
"details": {
"default_addr_type": "msisdn",
"addresses": {"msisdn": {msisdn: {"default": True}}},
}
}
)["id"]
def preprocess_inbound(self, data, channel):
msisdn = self.pop_from_address(data, channel)
if msisdn is None:
return
msisdn = e_164(msisdn)
data["from_identity"] = self.get_or_create_identity(msisdn)
if channel.concurrency_limit == 0:
return
related_outbound = self.get_related_outbound_id(data, channel)
if self.is_close_event(data, channel) and related_outbound:
try:
message = Outbound.objects.get(vumi_message_id=related_outbound)
except (ObjectDoesNotExist, MultipleObjectsReturned):
message = (
Outbound.objects.filter(to_identity=data["from_identity"])
.order_by("-created_at")
.last()
)
if message:
ConcurrencyLimiter.decr_message_count(channel, message.last_sent_time)
preprocess_inbound = InboundPreprocessor().preprocess_inbound
class InboundViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows Inbound models to be viewed or edited.
"""
permission_classes = (IsAuthenticated,)
queryset = Inbound.objects.all()
filterset_class = InboundFilter
filter_backends = (filters.DjangoFilterBackend, OrderingFilter)
ordering_fields = ("created_at",)
ordering = ("-created_at",)
def get_serializer_class(self):
if self.action == "create":
if self.channel.channel_type == Channel.VUMI_TYPE:
return InboundSerializer
elif self.channel.channel_type == Channel.JUNEBUG_TYPE:
return JunebugInboundSerializer
elif self.channel.channel_type == Channel.WASSUP_API_TYPE:
return WassupInboundSerializer
elif self.channel.channel_type == Channel.WHATSAPP_API_TYPE:
return WhatsAppInboundSerializer
return InboundSerializer
def create(self, request, *args, **kwargs):
if not kwargs.get("channel_id"):
self.channel = Channel.objects.get(default=True)
else:
self.channel = Channel.objects.get(channel_id=kwargs.get("channel_id"))
preprocess_inbound(request.data, self.channel)
return super(InboundViewSet, self).create(request, *args, **kwargs)
def fire_delivery_hook(outbound):
outbound.refresh_from_db()
# Only fire if the message has been delivered or we've reached max attempts
if (
not outbound.delivered
and outbound.attempts < settings.MESSAGE_SENDER_MAX_RETRIES
):
return
payload = {
"outbound_id": str(outbound.id),
"delivered": outbound.delivered,
"to_addr": outbound.to_addr,
}
if hasattr(outbound, "to_identity"):
payload["identity"] = outbound.to_identity
if payload["to_addr"] is None and payload.get("identity", None) is None:
raise InvalidMessage(outbound)
# Becaues the Junebug event endpoint has no authentication, we get an
# AnonymousUser object for the user. So we have to manually find all of the
# hook events, ignoring the user, and deliver them.
hooks = Hook.objects.filter(event="outbound.delivery_report")
for hook in hooks:
hook.deliver_hook(None, payload_override={"hook": hook.dict(), "data": payload})
def decr_message_count(message):
if message.channel:
channel = message.channel
else:
channel = Channel.objects.get(default=True)
ConcurrencyLimiter.decr_message_count(channel, message.last_sent_time)
outbound_event_total = Counter(
"outbound_event_total", "Number of Outbound events", ["type", "channel"]
)
class EventListener(APIView):
"""
Triggers updates to outbound messages based on event data from Vumi
"""
permission_classes = (AllowAny,)
def post(self, request, *args, **kwargs):
"""
Checks for expect event types before continuing
"""
serializer = EventSerializer(data=request.data)
if not serializer.is_valid():
return Response(
{"accepted": False, "reason": serializer.errors}, status=400
)
data = serializer.validated_data
event_type = {
"ack": "ack",
"nack": "nack",
"delivery_report": "delivery_succeeded",
}.get(data["event_type"])
accepted, reason = process_event(
data["user_message_id"], event_type, data["nack_reason"], data["timestamp"]
)
return Response(
{"accepted": accepted, "reason": reason}, status=200 if accepted else 400
)
# TODO make this work in test harness, works in production
# def perform_create(self, serializer):
# serializer.save(created_by=self.request.user,
# updated_by=self.request.user)
#
# def perform_update(self, serializer):
# serializer.save(updated_by=self.request.user)
class JunebugEventListener(APIView):
"""
Triggers updates to outbound messages based on event data from Junebug
"""
permission_classes = (AllowAny,)
def post(self, request, *args, **kwargs):
"""
Updates the message from the event data.
"""
serializer = JunebugEventSerializer(data=request.data)
if not serializer.is_valid():
return Response(
{"accepted": False, "reason": serializer.errors}, status=400
)
data = serializer.validated_data
event_type = {
"submitted": "ack",
"rejected": "nack",
"delivery_succeeded": "delivery_succeeded",
"delivery_failed": "delivery_failed",
}.get(data["event_type"])
accepted, reason = process_event(
data["message_id"], event_type, data["event_details"], data["timestamp"]
)
return Response(
{"accepted": accepted, "reason": reason}, status=200 if accepted else 400
)
class WassupEventListener(APIView):
"""
Triggers updates to outbound messages based on event data from Wassup
"""
permission_classes = (AllowAny,)
def post(self, request, *args, **kwargs):
serializer = WassupEventSerializer(data=request.data)
if not serializer.is_valid():
return Response(
{"accepted": False, "reason": serializer.errors}, status=400
)
data = serializer.validated_data
dispatcher = {"message.direct_outbound.status": self.handle_status}
handler = dispatcher.get(data["hook"]["event"])
return handler(data["hook"], data["data"])
def handle_status(self, hook, data):
event_type = {
"sent": "ack",
"unsent": "nack",
"delivered": "delivery_succeeded",
"failed": "delivery_failed",
}.get(data["status"])
accepted, reason = process_event(
data["message_uuid"],
event_type,
data.get("description"),
data.get("timestamp"),
)
return Response(
{"accepted": accepted, "reason": reason}, status=200 if accepted else 400
)
class WhatsAppEventListener(APIView):
permission_classes = (AllowAny,)
def validate_signature(self, channel, request):
secret = channel.configuration.get("HMAC_SECRET")
if not secret:
raise AuthenticationFailed(
"No HMAC_SECRET set on channel {}".format(channel.channel_id)
)
signature = request.META.get("HTTP_X_ENGAGE_HOOK_SIGNATURE")
if not signature:
raise AuthenticationFailed("X-Engage-Hook-Signature header required")
h = hmac.new(secret.encode(), request.body, sha256)
if not hmac.compare_digest(base64.b64encode(h.digest()).decode(), signature):
raise AuthenticationFailed("Invalid hook signature")
def handle_event(self, serializer):
data = serializer.validated_data
event_type = {
"sent": "ack",
"delivered": "delivery_succeeded",
"failed": "delivery_failed",
"read": "read",
}.get(data["status"])
accepted, reason = process_event(data["id"], event_type, "", data["timestamp"])
return {"accepted": accepted, "reason": reason, "id": data["id"]}
def handle_inbound(self, serializer):
serializer.save()
return {"accepted": True, "id": serializer.data["id"]}
def post(self, request, channel_id, *args, **kwargs):
channel = get_object_or_404(Channel, pk=channel_id)
self.validate_signature(channel, request)
serializer = WhatsAppWebhookSerializer(data=request.data)
if not serializer.is_valid():
return Response(
{"accepted": False, "reason": serializer.errors}, status=400
)
events = []
for event in serializer.validated_data.get("statuses", []):
event_serializer = WhatsAppEventSerializer(data=event)
if not event_serializer.is_valid():
events.append(
{
"accepted": False,
"reason": event_serializer.errors,
"id": event.get("id"),
}
)
continue
events.append(self.handle_event(event_serializer))
inbounds = []
for inbound in serializer.validated_data.get("messages", []):
preprocess_inbound(inbound, channel)
inbound_serializer = WhatsAppInboundSerializer(data=inbound)
if not inbound_serializer.is_valid():
inbounds.append(
{
"accepted": False,
"reason": inbound_serializer.errors,
"id": inbound.get("id"),
}
)
continue
inbounds.append(self.handle_inbound(inbound_serializer))
accepted = all(e["accepted"] for e in events) and all(
i["accepted"] for i in inbounds
)
return Response(
{"accepted": accepted, "messages": inbounds, "statuses": events},
status=200 if accepted else 400,
)
class MetricsView(APIView):
""" Metrics Interaction
GET - returns list of all available metrics on the service
POST - starts up the task that fires all the scheduled metrics
"""
permission_classes = (IsAuthenticated,)
def get(self, request, *args, **kwargs):
status = 200
resp = {"metrics_available": get_available_metrics()}
return Response(resp, status=status)
def post(self, request, *args, **kwargs):
status = 201
# Uncomment line below if scheduled metrics are added
# scheduled_metrics.apply_async()
resp = {"scheduled_metrics_initiated": True}
return Response(resp, status=status)
class HealthcheckView(APIView):
""" Healthcheck Interaction
GET - returns service up - getting auth'd requires DB
"""
permission_classes = (IsAuthenticated,)
def get(self, request, *args, **kwargs):
import seed_message_sender
import django
import rest_framework
status = 200
resp = {
"up": True,
"result": {
"database": "Accessible",
"version": seed_message_sender.__version__,
"libraries": {
"django": django.__version__,
"djangorestframework": rest_framework.__version__,
},
},
}
return Response(resp, status=status)
class FailedTaskViewSet(
mixins.ListModelMixin, mixins.RetrieveModelMixin, viewsets.GenericViewSet
):
permission_classes = (IsAuthenticated,)
queryset = OutboundSendFailure.objects.all()
serializer_class = OutboundSendFailureSerializer
pagination_class = IdCursorPagination
def create(self, request):
status = 201
resp = {"requeued_failed_tasks": True}
requeue_failed_tasks.delay()
return Response(resp, status=status)
class AggregateOutboundViewSet(viewsets.GenericViewSet):
permission_classes = (IsAuthenticated,)
queryset = AggregateOutbounds.objects.all()
serializer_class = AggregateOutboundSerializer
def create(self, request):
serializer = self.get_serializer_class()(data=request.data)
serializer.is_valid(raise_exception=True)
start = serializer.validated_data.get("start", None)
end = serializer.validated_data.get("end", None)
if not end:
end = datetime.now().date()
if not start:
diff = timedelta(days=settings.AGGREGATE_OUTBOUND_BACKTRACK)
start = (datetime.now() - diff).date()
aggregate_outbounds.delay(start.isoformat(), end.isoformat())
return Response({"aggregate_outbounds": True}, status=202)
class ArchivedOutboundViewSet(viewsets.GenericViewSet):
permission_classes = (IsAuthenticated,)
queryset = ArchivedOutbounds.objects.all()
serializer_class = ArchivedOutboundSerializer
def create(self, request):
serializer = self.get_serializer_class()(data=request.data)
serializer.is_valid(raise_exception=True)
start = serializer.validated_data["start"]
end = serializer.validated_data["end"]
archive_outbound.delay(start.isoformat(), end.isoformat())
return Response({"archived_outbounds": True}, status=202)
|
praekeltfoundation/seed-message-sender
|
message_sender/views.py
|
EventListener.post
|
python
|
def post(self, request, *args, **kwargs):
serializer = EventSerializer(data=request.data)
if not serializer.is_valid():
return Response(
{"accepted": False, "reason": serializer.errors}, status=400
)
data = serializer.validated_data
event_type = {
"ack": "ack",
"nack": "nack",
"delivery_report": "delivery_succeeded",
}.get(data["event_type"])
accepted, reason = process_event(
data["user_message_id"], event_type, data["nack_reason"], data["timestamp"]
)
return Response(
{"accepted": accepted, "reason": reason}, status=200 if accepted else 400
)
|
Checks for expect event types before continuing
|
train
|
https://github.com/praekeltfoundation/seed-message-sender/blob/257b01635171b9dbe1f5f13baa810c971bb2620e/message_sender/views.py#L384-L409
| null |
class EventListener(APIView):
"""
Triggers updates to outbound messages based on event data from Vumi
"""
permission_classes = (AllowAny,)
|
praekeltfoundation/seed-message-sender
|
message_sender/signals.py
|
psh_fire_msg_action_if_new
|
python
|
def psh_fire_msg_action_if_new(sender, instance, created, **kwargs):
if created:
from message_sender.tasks import send_message
send_message.apply_async(kwargs={"message_id": str(instance.id)})
|
Post save hook to fire message send task
|
train
|
https://github.com/praekeltfoundation/seed-message-sender/blob/257b01635171b9dbe1f5f13baa810c971bb2620e/message_sender/signals.py#L4-L10
| null |
from .models import Channel
def update_default_channels(sender, instance, created, **kwargs):
""" Post save hook to ensure that there is only one default
"""
if instance.default:
Channel.objects.filter(default=True).exclude(
channel_id=instance.channel_id
).update(default=False)
|
praekeltfoundation/seed-message-sender
|
message_sender/signals.py
|
update_default_channels
|
python
|
def update_default_channels(sender, instance, created, **kwargs):
if instance.default:
Channel.objects.filter(default=True).exclude(
channel_id=instance.channel_id
).update(default=False)
|
Post save hook to ensure that there is only one default
|
train
|
https://github.com/praekeltfoundation/seed-message-sender/blob/257b01635171b9dbe1f5f13baa810c971bb2620e/message_sender/signals.py#L13-L19
| null |
from .models import Channel
def psh_fire_msg_action_if_new(sender, instance, created, **kwargs):
""" Post save hook to fire message send task
"""
if created:
from message_sender.tasks import send_message
send_message.apply_async(kwargs={"message_id": str(instance.id)})
|
praekeltfoundation/seed-message-sender
|
message_sender/migrations/0020_outboundsendfailure_db_backed_fk_constraint.py
|
modify_fk_constraint
|
python
|
def modify_fk_constraint(apps, schema_editor):
model = apps.get_model("message_sender", "OutboundSendFailure")
table = model._meta.db_table
with schema_editor.connection.cursor() as cursor:
constraints = schema_editor.connection.introspection.get_constraints(
cursor, table
)
[constraint] = filter(lambda c: c[1]["foreign_key"], constraints.items())
[name, _] = constraint
sql_delete_fk = (
"SET CONSTRAINTS {name} IMMEDIATE; "
"ALTER TABLE {table} DROP CONSTRAINT {name}"
).format(table=schema_editor.quote_name(table), name=schema_editor.quote_name(name))
schema_editor.execute(sql_delete_fk)
field = model.outbound.field
to_table = field.remote_field.model._meta.db_table
to_column = field.remote_field.model._meta.get_field(
field.remote_field.field_name
).column
sql_create_fk = (
"ALTER TABLE {table} ADD CONSTRAINT {name} FOREIGN KEY "
"({column}) REFERENCES {to_table} ({to_column}) "
"ON DELETE CASCADE {deferrable};"
).format(
table=schema_editor.quote_name(table),
name=schema_editor.quote_name(name),
column=schema_editor.quote_name(field.column),
to_table=schema_editor.quote_name(to_table),
to_column=schema_editor.quote_name(to_column),
deferrable=schema_editor.connection.ops.deferrable_sql(),
)
schema_editor.execute(sql_create_fk)
|
Delete's the current foreign key contraint on the outbound field, and adds
it again, but this time with an ON DELETE clause
|
train
|
https://github.com/praekeltfoundation/seed-message-sender/blob/257b01635171b9dbe1f5f13baa810c971bb2620e/message_sender/migrations/0020_outboundsendfailure_db_backed_fk_constraint.py#L10-L47
| null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-03-22 07:48
from __future__ import unicode_literals
from django.db import migrations, models
from django.db.backends.postgresql.schema import DatabaseSchemaEditor
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("message_sender", "0007_outboundsendfailure"),
("message_sender", "0019_auto_20180319_1446"),
]
operations = [
migrations.AlterField(
model_name="outboundsendfailure",
name="outbound",
field=models.ForeignKey(
on_delete=django.db.models.deletion.DO_NOTHING,
to="message_sender.Outbound",
),
),
migrations.RunPython(modify_fk_constraint),
]
|
praekeltfoundation/seed-message-sender
|
message_sender/serializers.py
|
InboundSerializer.to_internal_value
|
python
|
def to_internal_value(self, data):
if "session_event" in data:
data["helper_metadata"]["session_event"] = data["session_event"]
return super(InboundSerializer, self).to_internal_value(data)
|
Adds extra data to the helper_metadata field.
|
train
|
https://github.com/praekeltfoundation/seed-message-sender/blob/257b01635171b9dbe1f5f13baa810c971bb2620e/message_sender/serializers.py#L97-L104
| null |
class InboundSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Inbound
fields = (
"url",
"id",
"message_id",
"in_reply_to",
"to_addr",
"from_addr",
"content",
"transport_name",
"transport_type",
"helper_metadata",
"created_at",
"updated_at",
"from_identity",
)
validators = [OneFieldRequiredValidator(["from_addr", "from_identity"])]
|
praekeltfoundation/seed-message-sender
|
message_sender/tasks.py
|
deliver_hook
|
python
|
def deliver_hook(target, payload, instance_id=None, hook_id=None, **kwargs):
r = requests.post(
url=target,
data=json.dumps(payload),
headers={
"Content-Type": "application/json",
"Authorization": "Token %s" % settings.HOOK_AUTH_TOKEN,
},
)
r.raise_for_status()
return r.text
|
target: the url to receive the payload.
payload: a python primitive data structure
instance_id: a possibly None "trigger" instance ID
hook_id: the ID of defining Hook object
|
train
|
https://github.com/praekeltfoundation/seed-message-sender/blob/257b01635171b9dbe1f5f13baa810c971bb2620e/message_sender/tasks.py#L76-L92
| null |
import gzip
import json
import os
import pytz
import random
import requests
import time
from celery.exceptions import MaxRetriesExceededError, SoftTimeLimitExceeded
from celery.task import Task
from celery.utils.log import get_task_logger
from rest_hooks.models import Hook
from datetime import datetime, timedelta
from demands import HTTPServiceError
from django.conf import settings
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django.core.files import File
from django.db.models import Count, Sum
from django.db.models.signals import post_delete
from django.utils import timezone
from seed_services_client.metrics import MetricsApiClient
from requests import exceptions as requests_exceptions
from requests.exceptions import ConnectionError, HTTPError
from rest_framework.renderers import JSONRenderer
from rest_hooks.models import model_deleted
from seed_message_sender.celery import app
from .factory import MessageClientFactory
from .models import (
Outbound,
OutboundSendFailure,
Channel,
AggregateOutbounds,
ArchivedOutbounds,
)
from .serializers import OutboundArchiveSerializer
from seed_message_sender.utils import (
load_callable,
get_identity_address,
get_identity_by_address,
create_identity,
)
from message_sender.utils import daterange
logger = get_task_logger(__name__)
voice_to_addr_formatter = load_callable(settings.VOICE_TO_ADDR_FORMATTER)
text_to_addr_formatter = load_callable(settings.TEXT_TO_ADDR_FORMATTER)
def calculate_retry_delay(attempt, max_delay=300):
"""Calculates an exponential backoff for retry attempts with a small
amount of jitter."""
delay = int(random.uniform(2, 4) ** attempt)
if delay > max_delay:
# After reaching the max delay, stop using expontential growth
# and keep the delay nearby the max.
delay = int(random.uniform(max_delay - 20, max_delay + 20))
return delay
@app.task(
autoretry_for=(HTTPError, ConnectionError, SoftTimeLimitExceeded),
retry_backoff=True,
retry_jitter=True,
max_retries=15,
acks_late=True,
soft_time_limit=10,
time_limit=15,
)
def deliver_hook_wrapper(target, payload, instance, hook):
if instance is not None:
instance_id = instance.id
else:
instance_id = None
kwargs = dict(
target=target, payload=payload, instance_id=instance_id, hook_id=hook.id
)
deliver_hook.apply_async(kwargs=kwargs)
metric_client = MetricsApiClient(url=settings.METRICS_URL, auth=settings.METRICS_AUTH)
@app.task(
autoretry_for=(HTTPError, ConnectionError, HTTPServiceError, SoftTimeLimitExceeded),
retry_backoff=True,
retry_jitter=True,
max_retries=15,
acks_late=True,
soft_time_limit=10,
time_limit=15,
)
def fire_metric(metric_name, metric_value):
""" Fires a metric using the MetricsApiClient
"""
metric_value = float(metric_value)
metric = {metric_name: metric_value}
metric_client.fire_metrics(**metric)
return "Fired metric <{}> with value <{}>".format(metric_name, metric_value)
class ConcurrencyLimiter(object):
BUCKET_SIZE = 60
@classmethod
def get_key(cls, channel_id, bucket):
return "%s_messages_at_%s" % (channel_id, bucket)
@classmethod
def get_current_message_count(cls, channel):
# Sum the values in all the buckets to get the total
total = 0
number_of_buckets = channel.message_timeout // cls.BUCKET_SIZE + 1
bucket = int(time.time() // cls.BUCKET_SIZE)
for i in range(bucket, bucket - number_of_buckets, -1):
value = cache.get(cls.get_key(channel.channel_id, i))
if value:
total += int(value)
return total
@classmethod
def incr_message_count(cls, channel_id, timeout):
bucket = int(time.time() // cls.BUCKET_SIZE)
key = cls.get_key(channel_id, bucket)
# Add the bucket size to the expiry time so messages that start at
# the end of the bucket still complete
if not cache.add(key, 1, timeout + cls.BUCKET_SIZE):
cache.incr(key)
@classmethod
def decr_message_count(cls, channel, msg_time):
if channel.concurrency_limit == 0:
return
timeout = channel.message_timeout
if not msg_time:
return
# Convert from datetime to seconds since epoch
msg_time = msg_time.replace(tzinfo=None) - msg_time.utcoffset()
msg_time = (msg_time - datetime(1970, 1, 1)).total_seconds()
time_since = time.time() - msg_time
if time_since > timeout:
return
bucket = int(msg_time // cls.BUCKET_SIZE)
key = cls.get_key(channel.channel_id, bucket)
# Set the expiry time to the timeout minus the time passed since
# the message was sent.
if int(cache.get_or_set(key, lambda: 0, timeout - time_since)) > 0:
cache.decr(key)
@classmethod
def manage_limit(cls, task, channel):
limit = channel.concurrency_limit
timeout = channel.message_timeout
delay = channel.message_delay
if limit > 0:
if cls.get_current_message_count(channel) >= limit:
task.retry(countdown=delay)
cls.incr_message_count(channel.channel_id, timeout)
class SendMessage(Task):
"""
Task to load and contruct message and send them off
"""
name = "message_sender.tasks.send_message"
default_retry_delay = 5
max_retries = None
max_error_retries = 5
class FailedEventRequest(Exception):
"""
The attempted task failed because of a non-200 HTTP return
code.
"""
def get_client(self, channel=None):
return MessageClientFactory.create(channel)
def fire_failed_msisdn_lookup(self, to_identity):
"""
Fires a webhook in the event of a None to_addr.
"""
payload = {"to_identity": to_identity}
hooks = Hook.objects.filter(event="identity.no_address")
for hook in hooks:
hook.deliver_hook(
None, payload_override={"hook": hook.dict(), "data": payload}
)
def run(self, message_id, **kwargs):
"""
Load and contruct message and send them off
"""
log = self.get_logger(**kwargs)
error_retry_count = kwargs.get("error_retry_count", 0)
if error_retry_count >= self.max_error_retries:
raise MaxRetriesExceededError(
"Can't retry {0}[{1}] args:{2} kwargs:{3}".format(
self.name, self.request.id, self.request.args, kwargs
)
)
log.info("Loading Outbound Message <%s>" % message_id)
try:
message = Outbound.objects.select_related("channel").get(id=message_id)
except ObjectDoesNotExist:
logger.error("Missing Outbound message", exc_info=True)
return
if message.attempts < settings.MESSAGE_SENDER_MAX_RETRIES:
if error_retry_count > 0:
retry_delay = calculate_retry_delay(error_retry_count)
else:
retry_delay = self.default_retry_delay
log.info("Attempts: %s" % message.attempts)
# send or resend
try:
if not message.channel:
channel = Channel.objects.get(default=True)
else:
channel = message.channel
sender = self.get_client(channel)
ConcurrencyLimiter.manage_limit(self, channel)
if not message.to_addr and message.to_identity:
message.to_addr = get_identity_address(
message.to_identity, use_communicate_through=True
)
if not message.to_addr:
self.fire_failed_msisdn_lookup(message.to_identity)
return
if message.to_addr and not message.to_identity:
result = get_identity_by_address(message.to_addr)
if result:
message.to_identity = result[0]["id"]
else:
identity = {
"details": {
"default_addr_type": "msisdn",
"addresses": {
"msisdn": {message.to_addr: {"default": True}}
},
}
}
identity = create_identity(identity)
message.to_identity = identity["id"]
if "voice_speech_url" in message.metadata:
# OBD number of tries metric
fire_metric.apply_async(
kwargs={
"metric_name": "vumimessage.obd.tries.sum",
"metric_value": 1.0,
}
)
# Voice message
speech_url = message.metadata["voice_speech_url"]
vumiresponse = sender.send_voice(
voice_to_addr_formatter(message.to_addr),
message.content,
speech_url=speech_url,
session_event="new",
)
log.info("Sent voice message to <%s>" % message.to_addr)
elif "image_url" in message.metadata:
# Image message
image_url = message.metadata["image_url"]
vumiresponse = sender.send_image(
text_to_addr_formatter(message.to_addr),
message.content,
image_url=image_url,
)
log.info("Sent image message to <%s>" % (message.to_addr,))
else:
# Plain content
vumiresponse = sender.send_text(
text_to_addr_formatter(message.to_addr),
message.content,
metadata=message.metadata,
session_event="new",
)
log.info("Sent text message to <%s>" % (message.to_addr,))
message.last_sent_time = timezone.now()
message.attempts += 1
message.vumi_message_id = vumiresponse["message_id"]
message.save()
fire_metric.apply_async(
kwargs={"metric_name": "vumimessage.tries.sum", "metric_value": 1.0}
)
except requests_exceptions.ConnectionError as exc:
log.info("Connection Error sending message")
fire_metric.delay("sender.send_message.connection_error.sum", 1)
kwargs["error_retry_count"] = error_retry_count + 1
self.retry(
exc=exc, countdown=retry_delay, args=(message_id,), kwargs=kwargs
)
except requests_exceptions.Timeout as exc:
log.info("Sending message failed due to timeout")
fire_metric.delay("sender.send_message.timeout.sum", 1)
kwargs["error_retry_count"] = error_retry_count + 1
self.retry(
exc=exc, countdown=retry_delay, args=(message_id,), kwargs=kwargs
)
except (requests_exceptions.HTTPError, HTTPServiceError) as exc:
# retry message sending if in 500 range (3 default
# retries)
log.info(
"Sending message failed due to status: %s"
% exc.response.status_code
)
metric_name = (
"sender.send_message.http_error.%s.sum" % exc.response.status_code
)
fire_metric.delay(metric_name, 1)
kwargs["error_retry_count"] = error_retry_count + 1
self.retry(
exc=exc, countdown=retry_delay, args=(message_id,), kwargs=kwargs
)
# If we've gotten this far the message send was successful.
fire_metric.apply_async(
kwargs={"metric_name": "message.sent.sum", "metric_value": 1.0}
)
return vumiresponse
else:
# This is for retries based on async nacks from the transport.
log.info("Message <%s> at max retries." % str(message_id))
message.to_addr = ""
message.save(update_fields=["to_addr"])
fire_metric.apply_async(
kwargs={
"metric_name": "vumimessage.maxretries.sum",
"metric_value": 1.0,
}
)
# Count failures on exhausted tries.
fire_metric.apply_async(
kwargs={"metric_name": "message.failures.sum", "metric_value": 1.0}
)
def on_failure(self, exc, task_id, args, kwargs, einfo):
error_retry_count = kwargs.get("error_retry_count", 0)
if error_retry_count == self.max_error_retries:
if "message_id" in kwargs:
message_id = kwargs["message_id"]
else:
message_id = args[0]
OutboundSendFailure.objects.create(
outbound_id=message_id,
initiated_at=self.request.eta,
reason=einfo.exception.message,
task_id=task_id,
)
# Count permanent failures.
fire_metric.apply_async(
kwargs={"metric_name": "message.failures.sum", "metric_value": 1.0}
)
super(SendMessage, self).on_failure(exc, task_id, args, kwargs, einfo)
send_message = SendMessage()
class RequeueFailedTasks(Task):
"""
Task to requeue failed Outbounds.
"""
name = "message_sender.tasks.requeue_failed_tasks"
def run(self, **kwargs):
log = self.get_logger(**kwargs)
failures = OutboundSendFailure.objects
log.info(
"Attempting to requeue <%s> failed Outbound sends" % failures.all().count()
)
for failure in failures.iterator():
outbound_id = str(failure.outbound_id)
# Cleanup the failure before requeueing it.
failure.delete()
send_message.delay(outbound_id)
requeue_failed_tasks = RequeueFailedTasks()
class AggregateOutboundMessages(Task):
"""
Task to aggregate the outbound messages and store the results in the
aggregate table
"""
name = "message_sender.tasks.aggregate_outbounds"
def run(self, start_date, end_date):
start_date = datetime.strptime(start_date, "%Y-%m-%d").replace(tzinfo=pytz.UTC)
end_date = datetime.strptime(end_date, "%Y-%m-%d").replace(tzinfo=pytz.UTC)
# Delete any existing aggregates for these dates. This is necessary
# to avoid having leftovers from changed objects. eg. There were
# undelivered messages, but now they're all delivered, so we don't want
# the undelivered aggregate to still be there, but an update won't set
# the undelivered aggregate to 0.
AggregateOutbounds.objects.filter(
date__gte=start_date.date(), date__lte=end_date.date()
).delete()
for d in daterange(start_date, end_date):
query = Outbound.objects.filter(
created_at__gte=d, created_at__lt=(d + timedelta(1))
)
query = query.values("delivered", "channel")
query = query.annotate(attempts=Sum("attempts"), total=Count("*"))
for aggregate in query.iterator():
AggregateOutbounds.objects.create(
date=d,
delivered=aggregate["delivered"],
channel_id=aggregate["channel"],
attempts=aggregate["attempts"],
total=aggregate["total"],
)
aggregate_outbounds = AggregateOutboundMessages()
class ArchiveOutboundMessages(Task):
"""
Task to archive the outbound messages and store the messages in the
storage backend
"""
name = "message_sender.tasks.archive_outbounds"
def filename(self, date):
"""
Returns the filename for the provided date
"""
return "outbounds-{}.gz".format(date.strftime("%Y-%m-%d"))
def dump_data(self, filename, queryset):
"""
Serializes the queryset into a newline separated JSON format, and
places it into a gzipped file
"""
with gzip.open(filename, "wb") as f:
for outbound in queryset.iterator():
data = OutboundArchiveSerializer(outbound).data
data = JSONRenderer().render(data)
f.write(data)
f.write("\n".encode("utf-8"))
def create_archived_outbound(self, date, filename):
"""
Creates the required ArchivedOutbound entry with the file specified
at `filename`
"""
with open(filename, "rb") as f:
f = File(f)
ArchivedOutbounds.objects.create(date=date, archive=f)
def run(self, start_date, end_date):
start_date = datetime.strptime(start_date, "%Y-%m-%d").replace(tzinfo=pytz.UTC)
end_date = datetime.strptime(end_date, "%Y-%m-%d").replace(tzinfo=pytz.UTC)
for d in daterange(start_date, end_date):
if ArchivedOutbounds.objects.filter(date=d.date()).exists():
continue
query = Outbound.objects.filter(
created_at__gte=d, created_at__lt=(d + timedelta(1))
)
if not query.exists():
continue
filename = self.filename(d)
self.dump_data(filename, query)
self.create_archived_outbound(d, filename)
os.remove(filename)
# Remove the post_delete hook from rest_hooks, otherwise we'll have
# to load all of the outbounds into memory
post_delete.disconnect(
receiver=model_deleted, dispatch_uid="instance-deleted-hook"
)
query.delete()
post_delete.connect(
receiver=model_deleted, dispatch_uid="instance-deleted-hook"
)
archive_outbound = ArchiveOutboundMessages()
|
praekeltfoundation/seed-message-sender
|
message_sender/tasks.py
|
fire_metric
|
python
|
def fire_metric(metric_name, metric_value):
metric_value = float(metric_value)
metric = {metric_name: metric_value}
metric_client.fire_metrics(**metric)
return "Fired metric <{}> with value <{}>".format(metric_name, metric_value)
|
Fires a metric using the MetricsApiClient
|
train
|
https://github.com/praekeltfoundation/seed-message-sender/blob/257b01635171b9dbe1f5f13baa810c971bb2620e/message_sender/tasks.py#L118-L124
| null |
import gzip
import json
import os
import pytz
import random
import requests
import time
from celery.exceptions import MaxRetriesExceededError, SoftTimeLimitExceeded
from celery.task import Task
from celery.utils.log import get_task_logger
from rest_hooks.models import Hook
from datetime import datetime, timedelta
from demands import HTTPServiceError
from django.conf import settings
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django.core.files import File
from django.db.models import Count, Sum
from django.db.models.signals import post_delete
from django.utils import timezone
from seed_services_client.metrics import MetricsApiClient
from requests import exceptions as requests_exceptions
from requests.exceptions import ConnectionError, HTTPError
from rest_framework.renderers import JSONRenderer
from rest_hooks.models import model_deleted
from seed_message_sender.celery import app
from .factory import MessageClientFactory
from .models import (
Outbound,
OutboundSendFailure,
Channel,
AggregateOutbounds,
ArchivedOutbounds,
)
from .serializers import OutboundArchiveSerializer
from seed_message_sender.utils import (
load_callable,
get_identity_address,
get_identity_by_address,
create_identity,
)
from message_sender.utils import daterange
logger = get_task_logger(__name__)
voice_to_addr_formatter = load_callable(settings.VOICE_TO_ADDR_FORMATTER)
text_to_addr_formatter = load_callable(settings.TEXT_TO_ADDR_FORMATTER)
def calculate_retry_delay(attempt, max_delay=300):
"""Calculates an exponential backoff for retry attempts with a small
amount of jitter."""
delay = int(random.uniform(2, 4) ** attempt)
if delay > max_delay:
# After reaching the max delay, stop using expontential growth
# and keep the delay nearby the max.
delay = int(random.uniform(max_delay - 20, max_delay + 20))
return delay
@app.task(
autoretry_for=(HTTPError, ConnectionError, SoftTimeLimitExceeded),
retry_backoff=True,
retry_jitter=True,
max_retries=15,
acks_late=True,
soft_time_limit=10,
time_limit=15,
)
def deliver_hook(target, payload, instance_id=None, hook_id=None, **kwargs):
"""
target: the url to receive the payload.
payload: a python primitive data structure
instance_id: a possibly None "trigger" instance ID
hook_id: the ID of defining Hook object
"""
r = requests.post(
url=target,
data=json.dumps(payload),
headers={
"Content-Type": "application/json",
"Authorization": "Token %s" % settings.HOOK_AUTH_TOKEN,
},
)
r.raise_for_status()
return r.text
def deliver_hook_wrapper(target, payload, instance, hook):
if instance is not None:
instance_id = instance.id
else:
instance_id = None
kwargs = dict(
target=target, payload=payload, instance_id=instance_id, hook_id=hook.id
)
deliver_hook.apply_async(kwargs=kwargs)
metric_client = MetricsApiClient(url=settings.METRICS_URL, auth=settings.METRICS_AUTH)
@app.task(
autoretry_for=(HTTPError, ConnectionError, HTTPServiceError, SoftTimeLimitExceeded),
retry_backoff=True,
retry_jitter=True,
max_retries=15,
acks_late=True,
soft_time_limit=10,
time_limit=15,
)
class ConcurrencyLimiter(object):
BUCKET_SIZE = 60
@classmethod
def get_key(cls, channel_id, bucket):
return "%s_messages_at_%s" % (channel_id, bucket)
@classmethod
def get_current_message_count(cls, channel):
# Sum the values in all the buckets to get the total
total = 0
number_of_buckets = channel.message_timeout // cls.BUCKET_SIZE + 1
bucket = int(time.time() // cls.BUCKET_SIZE)
for i in range(bucket, bucket - number_of_buckets, -1):
value = cache.get(cls.get_key(channel.channel_id, i))
if value:
total += int(value)
return total
@classmethod
def incr_message_count(cls, channel_id, timeout):
bucket = int(time.time() // cls.BUCKET_SIZE)
key = cls.get_key(channel_id, bucket)
# Add the bucket size to the expiry time so messages that start at
# the end of the bucket still complete
if not cache.add(key, 1, timeout + cls.BUCKET_SIZE):
cache.incr(key)
@classmethod
def decr_message_count(cls, channel, msg_time):
if channel.concurrency_limit == 0:
return
timeout = channel.message_timeout
if not msg_time:
return
# Convert from datetime to seconds since epoch
msg_time = msg_time.replace(tzinfo=None) - msg_time.utcoffset()
msg_time = (msg_time - datetime(1970, 1, 1)).total_seconds()
time_since = time.time() - msg_time
if time_since > timeout:
return
bucket = int(msg_time // cls.BUCKET_SIZE)
key = cls.get_key(channel.channel_id, bucket)
# Set the expiry time to the timeout minus the time passed since
# the message was sent.
if int(cache.get_or_set(key, lambda: 0, timeout - time_since)) > 0:
cache.decr(key)
@classmethod
def manage_limit(cls, task, channel):
limit = channel.concurrency_limit
timeout = channel.message_timeout
delay = channel.message_delay
if limit > 0:
if cls.get_current_message_count(channel) >= limit:
task.retry(countdown=delay)
cls.incr_message_count(channel.channel_id, timeout)
class SendMessage(Task):
"""
Task to load and contruct message and send them off
"""
name = "message_sender.tasks.send_message"
default_retry_delay = 5
max_retries = None
max_error_retries = 5
class FailedEventRequest(Exception):
"""
The attempted task failed because of a non-200 HTTP return
code.
"""
def get_client(self, channel=None):
return MessageClientFactory.create(channel)
def fire_failed_msisdn_lookup(self, to_identity):
"""
Fires a webhook in the event of a None to_addr.
"""
payload = {"to_identity": to_identity}
hooks = Hook.objects.filter(event="identity.no_address")
for hook in hooks:
hook.deliver_hook(
None, payload_override={"hook": hook.dict(), "data": payload}
)
def run(self, message_id, **kwargs):
"""
Load and contruct message and send them off
"""
log = self.get_logger(**kwargs)
error_retry_count = kwargs.get("error_retry_count", 0)
if error_retry_count >= self.max_error_retries:
raise MaxRetriesExceededError(
"Can't retry {0}[{1}] args:{2} kwargs:{3}".format(
self.name, self.request.id, self.request.args, kwargs
)
)
log.info("Loading Outbound Message <%s>" % message_id)
try:
message = Outbound.objects.select_related("channel").get(id=message_id)
except ObjectDoesNotExist:
logger.error("Missing Outbound message", exc_info=True)
return
if message.attempts < settings.MESSAGE_SENDER_MAX_RETRIES:
if error_retry_count > 0:
retry_delay = calculate_retry_delay(error_retry_count)
else:
retry_delay = self.default_retry_delay
log.info("Attempts: %s" % message.attempts)
# send or resend
try:
if not message.channel:
channel = Channel.objects.get(default=True)
else:
channel = message.channel
sender = self.get_client(channel)
ConcurrencyLimiter.manage_limit(self, channel)
if not message.to_addr and message.to_identity:
message.to_addr = get_identity_address(
message.to_identity, use_communicate_through=True
)
if not message.to_addr:
self.fire_failed_msisdn_lookup(message.to_identity)
return
if message.to_addr and not message.to_identity:
result = get_identity_by_address(message.to_addr)
if result:
message.to_identity = result[0]["id"]
else:
identity = {
"details": {
"default_addr_type": "msisdn",
"addresses": {
"msisdn": {message.to_addr: {"default": True}}
},
}
}
identity = create_identity(identity)
message.to_identity = identity["id"]
if "voice_speech_url" in message.metadata:
# OBD number of tries metric
fire_metric.apply_async(
kwargs={
"metric_name": "vumimessage.obd.tries.sum",
"metric_value": 1.0,
}
)
# Voice message
speech_url = message.metadata["voice_speech_url"]
vumiresponse = sender.send_voice(
voice_to_addr_formatter(message.to_addr),
message.content,
speech_url=speech_url,
session_event="new",
)
log.info("Sent voice message to <%s>" % message.to_addr)
elif "image_url" in message.metadata:
# Image message
image_url = message.metadata["image_url"]
vumiresponse = sender.send_image(
text_to_addr_formatter(message.to_addr),
message.content,
image_url=image_url,
)
log.info("Sent image message to <%s>" % (message.to_addr,))
else:
# Plain content
vumiresponse = sender.send_text(
text_to_addr_formatter(message.to_addr),
message.content,
metadata=message.metadata,
session_event="new",
)
log.info("Sent text message to <%s>" % (message.to_addr,))
message.last_sent_time = timezone.now()
message.attempts += 1
message.vumi_message_id = vumiresponse["message_id"]
message.save()
fire_metric.apply_async(
kwargs={"metric_name": "vumimessage.tries.sum", "metric_value": 1.0}
)
except requests_exceptions.ConnectionError as exc:
log.info("Connection Error sending message")
fire_metric.delay("sender.send_message.connection_error.sum", 1)
kwargs["error_retry_count"] = error_retry_count + 1
self.retry(
exc=exc, countdown=retry_delay, args=(message_id,), kwargs=kwargs
)
except requests_exceptions.Timeout as exc:
log.info("Sending message failed due to timeout")
fire_metric.delay("sender.send_message.timeout.sum", 1)
kwargs["error_retry_count"] = error_retry_count + 1
self.retry(
exc=exc, countdown=retry_delay, args=(message_id,), kwargs=kwargs
)
except (requests_exceptions.HTTPError, HTTPServiceError) as exc:
# retry message sending if in 500 range (3 default
# retries)
log.info(
"Sending message failed due to status: %s"
% exc.response.status_code
)
metric_name = (
"sender.send_message.http_error.%s.sum" % exc.response.status_code
)
fire_metric.delay(metric_name, 1)
kwargs["error_retry_count"] = error_retry_count + 1
self.retry(
exc=exc, countdown=retry_delay, args=(message_id,), kwargs=kwargs
)
# If we've gotten this far the message send was successful.
fire_metric.apply_async(
kwargs={"metric_name": "message.sent.sum", "metric_value": 1.0}
)
return vumiresponse
else:
# This is for retries based on async nacks from the transport.
log.info("Message <%s> at max retries." % str(message_id))
message.to_addr = ""
message.save(update_fields=["to_addr"])
fire_metric.apply_async(
kwargs={
"metric_name": "vumimessage.maxretries.sum",
"metric_value": 1.0,
}
)
# Count failures on exhausted tries.
fire_metric.apply_async(
kwargs={"metric_name": "message.failures.sum", "metric_value": 1.0}
)
def on_failure(self, exc, task_id, args, kwargs, einfo):
error_retry_count = kwargs.get("error_retry_count", 0)
if error_retry_count == self.max_error_retries:
if "message_id" in kwargs:
message_id = kwargs["message_id"]
else:
message_id = args[0]
OutboundSendFailure.objects.create(
outbound_id=message_id,
initiated_at=self.request.eta,
reason=einfo.exception.message,
task_id=task_id,
)
# Count permanent failures.
fire_metric.apply_async(
kwargs={"metric_name": "message.failures.sum", "metric_value": 1.0}
)
super(SendMessage, self).on_failure(exc, task_id, args, kwargs, einfo)
send_message = SendMessage()
class RequeueFailedTasks(Task):
"""
Task to requeue failed Outbounds.
"""
name = "message_sender.tasks.requeue_failed_tasks"
def run(self, **kwargs):
log = self.get_logger(**kwargs)
failures = OutboundSendFailure.objects
log.info(
"Attempting to requeue <%s> failed Outbound sends" % failures.all().count()
)
for failure in failures.iterator():
outbound_id = str(failure.outbound_id)
# Cleanup the failure before requeueing it.
failure.delete()
send_message.delay(outbound_id)
requeue_failed_tasks = RequeueFailedTasks()
class AggregateOutboundMessages(Task):
"""
Task to aggregate the outbound messages and store the results in the
aggregate table
"""
name = "message_sender.tasks.aggregate_outbounds"
def run(self, start_date, end_date):
start_date = datetime.strptime(start_date, "%Y-%m-%d").replace(tzinfo=pytz.UTC)
end_date = datetime.strptime(end_date, "%Y-%m-%d").replace(tzinfo=pytz.UTC)
# Delete any existing aggregates for these dates. This is necessary
# to avoid having leftovers from changed objects. eg. There were
# undelivered messages, but now they're all delivered, so we don't want
# the undelivered aggregate to still be there, but an update won't set
# the undelivered aggregate to 0.
AggregateOutbounds.objects.filter(
date__gte=start_date.date(), date__lte=end_date.date()
).delete()
for d in daterange(start_date, end_date):
query = Outbound.objects.filter(
created_at__gte=d, created_at__lt=(d + timedelta(1))
)
query = query.values("delivered", "channel")
query = query.annotate(attempts=Sum("attempts"), total=Count("*"))
for aggregate in query.iterator():
AggregateOutbounds.objects.create(
date=d,
delivered=aggregate["delivered"],
channel_id=aggregate["channel"],
attempts=aggregate["attempts"],
total=aggregate["total"],
)
aggregate_outbounds = AggregateOutboundMessages()
class ArchiveOutboundMessages(Task):
"""
Task to archive the outbound messages and store the messages in the
storage backend
"""
name = "message_sender.tasks.archive_outbounds"
def filename(self, date):
"""
Returns the filename for the provided date
"""
return "outbounds-{}.gz".format(date.strftime("%Y-%m-%d"))
def dump_data(self, filename, queryset):
"""
Serializes the queryset into a newline separated JSON format, and
places it into a gzipped file
"""
with gzip.open(filename, "wb") as f:
for outbound in queryset.iterator():
data = OutboundArchiveSerializer(outbound).data
data = JSONRenderer().render(data)
f.write(data)
f.write("\n".encode("utf-8"))
def create_archived_outbound(self, date, filename):
"""
Creates the required ArchivedOutbound entry with the file specified
at `filename`
"""
with open(filename, "rb") as f:
f = File(f)
ArchivedOutbounds.objects.create(date=date, archive=f)
def run(self, start_date, end_date):
start_date = datetime.strptime(start_date, "%Y-%m-%d").replace(tzinfo=pytz.UTC)
end_date = datetime.strptime(end_date, "%Y-%m-%d").replace(tzinfo=pytz.UTC)
for d in daterange(start_date, end_date):
if ArchivedOutbounds.objects.filter(date=d.date()).exists():
continue
query = Outbound.objects.filter(
created_at__gte=d, created_at__lt=(d + timedelta(1))
)
if not query.exists():
continue
filename = self.filename(d)
self.dump_data(filename, query)
self.create_archived_outbound(d, filename)
os.remove(filename)
# Remove the post_delete hook from rest_hooks, otherwise we'll have
# to load all of the outbounds into memory
post_delete.disconnect(
receiver=model_deleted, dispatch_uid="instance-deleted-hook"
)
query.delete()
post_delete.connect(
receiver=model_deleted, dispatch_uid="instance-deleted-hook"
)
archive_outbound = ArchiveOutboundMessages()
|
praekeltfoundation/seed-message-sender
|
message_sender/tasks.py
|
SendMessage.fire_failed_msisdn_lookup
|
python
|
def fire_failed_msisdn_lookup(self, to_identity):
payload = {"to_identity": to_identity}
hooks = Hook.objects.filter(event="identity.no_address")
for hook in hooks:
hook.deliver_hook(
None, payload_override={"hook": hook.dict(), "data": payload}
)
|
Fires a webhook in the event of a None to_addr.
|
train
|
https://github.com/praekeltfoundation/seed-message-sender/blob/257b01635171b9dbe1f5f13baa810c971bb2620e/message_sender/tasks.py#L214-L223
| null |
class SendMessage(Task):
"""
Task to load and contruct message and send them off
"""
name = "message_sender.tasks.send_message"
default_retry_delay = 5
max_retries = None
max_error_retries = 5
class FailedEventRequest(Exception):
"""
The attempted task failed because of a non-200 HTTP return
code.
"""
def get_client(self, channel=None):
return MessageClientFactory.create(channel)
def run(self, message_id, **kwargs):
"""
Load and contruct message and send them off
"""
log = self.get_logger(**kwargs)
error_retry_count = kwargs.get("error_retry_count", 0)
if error_retry_count >= self.max_error_retries:
raise MaxRetriesExceededError(
"Can't retry {0}[{1}] args:{2} kwargs:{3}".format(
self.name, self.request.id, self.request.args, kwargs
)
)
log.info("Loading Outbound Message <%s>" % message_id)
try:
message = Outbound.objects.select_related("channel").get(id=message_id)
except ObjectDoesNotExist:
logger.error("Missing Outbound message", exc_info=True)
return
if message.attempts < settings.MESSAGE_SENDER_MAX_RETRIES:
if error_retry_count > 0:
retry_delay = calculate_retry_delay(error_retry_count)
else:
retry_delay = self.default_retry_delay
log.info("Attempts: %s" % message.attempts)
# send or resend
try:
if not message.channel:
channel = Channel.objects.get(default=True)
else:
channel = message.channel
sender = self.get_client(channel)
ConcurrencyLimiter.manage_limit(self, channel)
if not message.to_addr and message.to_identity:
message.to_addr = get_identity_address(
message.to_identity, use_communicate_through=True
)
if not message.to_addr:
self.fire_failed_msisdn_lookup(message.to_identity)
return
if message.to_addr and not message.to_identity:
result = get_identity_by_address(message.to_addr)
if result:
message.to_identity = result[0]["id"]
else:
identity = {
"details": {
"default_addr_type": "msisdn",
"addresses": {
"msisdn": {message.to_addr: {"default": True}}
},
}
}
identity = create_identity(identity)
message.to_identity = identity["id"]
if "voice_speech_url" in message.metadata:
# OBD number of tries metric
fire_metric.apply_async(
kwargs={
"metric_name": "vumimessage.obd.tries.sum",
"metric_value": 1.0,
}
)
# Voice message
speech_url = message.metadata["voice_speech_url"]
vumiresponse = sender.send_voice(
voice_to_addr_formatter(message.to_addr),
message.content,
speech_url=speech_url,
session_event="new",
)
log.info("Sent voice message to <%s>" % message.to_addr)
elif "image_url" in message.metadata:
# Image message
image_url = message.metadata["image_url"]
vumiresponse = sender.send_image(
text_to_addr_formatter(message.to_addr),
message.content,
image_url=image_url,
)
log.info("Sent image message to <%s>" % (message.to_addr,))
else:
# Plain content
vumiresponse = sender.send_text(
text_to_addr_formatter(message.to_addr),
message.content,
metadata=message.metadata,
session_event="new",
)
log.info("Sent text message to <%s>" % (message.to_addr,))
message.last_sent_time = timezone.now()
message.attempts += 1
message.vumi_message_id = vumiresponse["message_id"]
message.save()
fire_metric.apply_async(
kwargs={"metric_name": "vumimessage.tries.sum", "metric_value": 1.0}
)
except requests_exceptions.ConnectionError as exc:
log.info("Connection Error sending message")
fire_metric.delay("sender.send_message.connection_error.sum", 1)
kwargs["error_retry_count"] = error_retry_count + 1
self.retry(
exc=exc, countdown=retry_delay, args=(message_id,), kwargs=kwargs
)
except requests_exceptions.Timeout as exc:
log.info("Sending message failed due to timeout")
fire_metric.delay("sender.send_message.timeout.sum", 1)
kwargs["error_retry_count"] = error_retry_count + 1
self.retry(
exc=exc, countdown=retry_delay, args=(message_id,), kwargs=kwargs
)
except (requests_exceptions.HTTPError, HTTPServiceError) as exc:
# retry message sending if in 500 range (3 default
# retries)
log.info(
"Sending message failed due to status: %s"
% exc.response.status_code
)
metric_name = (
"sender.send_message.http_error.%s.sum" % exc.response.status_code
)
fire_metric.delay(metric_name, 1)
kwargs["error_retry_count"] = error_retry_count + 1
self.retry(
exc=exc, countdown=retry_delay, args=(message_id,), kwargs=kwargs
)
# If we've gotten this far the message send was successful.
fire_metric.apply_async(
kwargs={"metric_name": "message.sent.sum", "metric_value": 1.0}
)
return vumiresponse
else:
# This is for retries based on async nacks from the transport.
log.info("Message <%s> at max retries." % str(message_id))
message.to_addr = ""
message.save(update_fields=["to_addr"])
fire_metric.apply_async(
kwargs={
"metric_name": "vumimessage.maxretries.sum",
"metric_value": 1.0,
}
)
# Count failures on exhausted tries.
fire_metric.apply_async(
kwargs={"metric_name": "message.failures.sum", "metric_value": 1.0}
)
def on_failure(self, exc, task_id, args, kwargs, einfo):
error_retry_count = kwargs.get("error_retry_count", 0)
if error_retry_count == self.max_error_retries:
if "message_id" in kwargs:
message_id = kwargs["message_id"]
else:
message_id = args[0]
OutboundSendFailure.objects.create(
outbound_id=message_id,
initiated_at=self.request.eta,
reason=einfo.exception.message,
task_id=task_id,
)
# Count permanent failures.
fire_metric.apply_async(
kwargs={"metric_name": "message.failures.sum", "metric_value": 1.0}
)
super(SendMessage, self).on_failure(exc, task_id, args, kwargs, einfo)
|
praekeltfoundation/seed-message-sender
|
message_sender/tasks.py
|
SendMessage.run
|
python
|
def run(self, message_id, **kwargs):
log = self.get_logger(**kwargs)
error_retry_count = kwargs.get("error_retry_count", 0)
if error_retry_count >= self.max_error_retries:
raise MaxRetriesExceededError(
"Can't retry {0}[{1}] args:{2} kwargs:{3}".format(
self.name, self.request.id, self.request.args, kwargs
)
)
log.info("Loading Outbound Message <%s>" % message_id)
try:
message = Outbound.objects.select_related("channel").get(id=message_id)
except ObjectDoesNotExist:
logger.error("Missing Outbound message", exc_info=True)
return
if message.attempts < settings.MESSAGE_SENDER_MAX_RETRIES:
if error_retry_count > 0:
retry_delay = calculate_retry_delay(error_retry_count)
else:
retry_delay = self.default_retry_delay
log.info("Attempts: %s" % message.attempts)
# send or resend
try:
if not message.channel:
channel = Channel.objects.get(default=True)
else:
channel = message.channel
sender = self.get_client(channel)
ConcurrencyLimiter.manage_limit(self, channel)
if not message.to_addr and message.to_identity:
message.to_addr = get_identity_address(
message.to_identity, use_communicate_through=True
)
if not message.to_addr:
self.fire_failed_msisdn_lookup(message.to_identity)
return
if message.to_addr and not message.to_identity:
result = get_identity_by_address(message.to_addr)
if result:
message.to_identity = result[0]["id"]
else:
identity = {
"details": {
"default_addr_type": "msisdn",
"addresses": {
"msisdn": {message.to_addr: {"default": True}}
},
}
}
identity = create_identity(identity)
message.to_identity = identity["id"]
if "voice_speech_url" in message.metadata:
# OBD number of tries metric
fire_metric.apply_async(
kwargs={
"metric_name": "vumimessage.obd.tries.sum",
"metric_value": 1.0,
}
)
# Voice message
speech_url = message.metadata["voice_speech_url"]
vumiresponse = sender.send_voice(
voice_to_addr_formatter(message.to_addr),
message.content,
speech_url=speech_url,
session_event="new",
)
log.info("Sent voice message to <%s>" % message.to_addr)
elif "image_url" in message.metadata:
# Image message
image_url = message.metadata["image_url"]
vumiresponse = sender.send_image(
text_to_addr_formatter(message.to_addr),
message.content,
image_url=image_url,
)
log.info("Sent image message to <%s>" % (message.to_addr,))
else:
# Plain content
vumiresponse = sender.send_text(
text_to_addr_formatter(message.to_addr),
message.content,
metadata=message.metadata,
session_event="new",
)
log.info("Sent text message to <%s>" % (message.to_addr,))
message.last_sent_time = timezone.now()
message.attempts += 1
message.vumi_message_id = vumiresponse["message_id"]
message.save()
fire_metric.apply_async(
kwargs={"metric_name": "vumimessage.tries.sum", "metric_value": 1.0}
)
except requests_exceptions.ConnectionError as exc:
log.info("Connection Error sending message")
fire_metric.delay("sender.send_message.connection_error.sum", 1)
kwargs["error_retry_count"] = error_retry_count + 1
self.retry(
exc=exc, countdown=retry_delay, args=(message_id,), kwargs=kwargs
)
except requests_exceptions.Timeout as exc:
log.info("Sending message failed due to timeout")
fire_metric.delay("sender.send_message.timeout.sum", 1)
kwargs["error_retry_count"] = error_retry_count + 1
self.retry(
exc=exc, countdown=retry_delay, args=(message_id,), kwargs=kwargs
)
except (requests_exceptions.HTTPError, HTTPServiceError) as exc:
# retry message sending if in 500 range (3 default
# retries)
log.info(
"Sending message failed due to status: %s"
% exc.response.status_code
)
metric_name = (
"sender.send_message.http_error.%s.sum" % exc.response.status_code
)
fire_metric.delay(metric_name, 1)
kwargs["error_retry_count"] = error_retry_count + 1
self.retry(
exc=exc, countdown=retry_delay, args=(message_id,), kwargs=kwargs
)
# If we've gotten this far the message send was successful.
fire_metric.apply_async(
kwargs={"metric_name": "message.sent.sum", "metric_value": 1.0}
)
return vumiresponse
else:
# This is for retries based on async nacks from the transport.
log.info("Message <%s> at max retries." % str(message_id))
message.to_addr = ""
message.save(update_fields=["to_addr"])
fire_metric.apply_async(
kwargs={
"metric_name": "vumimessage.maxretries.sum",
"metric_value": 1.0,
}
)
# Count failures on exhausted tries.
fire_metric.apply_async(
kwargs={"metric_name": "message.failures.sum", "metric_value": 1.0}
)
|
Load and contruct message and send them off
|
train
|
https://github.com/praekeltfoundation/seed-message-sender/blob/257b01635171b9dbe1f5f13baa810c971bb2620e/message_sender/tasks.py#L225-L383
|
[
"def calculate_retry_delay(attempt, max_delay=300):\n \"\"\"Calculates an exponential backoff for retry attempts with a small\n amount of jitter.\"\"\"\n delay = int(random.uniform(2, 4) ** attempt)\n if delay > max_delay:\n # After reaching the max delay, stop using expontential growth\n # and keep the delay nearby the max.\n delay = int(random.uniform(max_delay - 20, max_delay + 20))\n return delay\n"
] |
class SendMessage(Task):
"""
Task to load and contruct message and send them off
"""
name = "message_sender.tasks.send_message"
default_retry_delay = 5
max_retries = None
max_error_retries = 5
class FailedEventRequest(Exception):
"""
The attempted task failed because of a non-200 HTTP return
code.
"""
def get_client(self, channel=None):
return MessageClientFactory.create(channel)
def fire_failed_msisdn_lookup(self, to_identity):
"""
Fires a webhook in the event of a None to_addr.
"""
payload = {"to_identity": to_identity}
hooks = Hook.objects.filter(event="identity.no_address")
for hook in hooks:
hook.deliver_hook(
None, payload_override={"hook": hook.dict(), "data": payload}
)
def on_failure(self, exc, task_id, args, kwargs, einfo):
error_retry_count = kwargs.get("error_retry_count", 0)
if error_retry_count == self.max_error_retries:
if "message_id" in kwargs:
message_id = kwargs["message_id"]
else:
message_id = args[0]
OutboundSendFailure.objects.create(
outbound_id=message_id,
initiated_at=self.request.eta,
reason=einfo.exception.message,
task_id=task_id,
)
# Count permanent failures.
fire_metric.apply_async(
kwargs={"metric_name": "message.failures.sum", "metric_value": 1.0}
)
super(SendMessage, self).on_failure(exc, task_id, args, kwargs, einfo)
|
praekeltfoundation/seed-message-sender
|
message_sender/tasks.py
|
ArchiveOutboundMessages.dump_data
|
python
|
def dump_data(self, filename, queryset):
with gzip.open(filename, "wb") as f:
for outbound in queryset.iterator():
data = OutboundArchiveSerializer(outbound).data
data = JSONRenderer().render(data)
f.write(data)
f.write("\n".encode("utf-8"))
|
Serializes the queryset into a newline separated JSON format, and
places it into a gzipped file
|
train
|
https://github.com/praekeltfoundation/seed-message-sender/blob/257b01635171b9dbe1f5f13baa810c971bb2620e/message_sender/tasks.py#L486-L496
| null |
class ArchiveOutboundMessages(Task):
"""
Task to archive the outbound messages and store the messages in the
storage backend
"""
name = "message_sender.tasks.archive_outbounds"
def filename(self, date):
"""
Returns the filename for the provided date
"""
return "outbounds-{}.gz".format(date.strftime("%Y-%m-%d"))
def create_archived_outbound(self, date, filename):
"""
Creates the required ArchivedOutbound entry with the file specified
at `filename`
"""
with open(filename, "rb") as f:
f = File(f)
ArchivedOutbounds.objects.create(date=date, archive=f)
def run(self, start_date, end_date):
start_date = datetime.strptime(start_date, "%Y-%m-%d").replace(tzinfo=pytz.UTC)
end_date = datetime.strptime(end_date, "%Y-%m-%d").replace(tzinfo=pytz.UTC)
for d in daterange(start_date, end_date):
if ArchivedOutbounds.objects.filter(date=d.date()).exists():
continue
query = Outbound.objects.filter(
created_at__gte=d, created_at__lt=(d + timedelta(1))
)
if not query.exists():
continue
filename = self.filename(d)
self.dump_data(filename, query)
self.create_archived_outbound(d, filename)
os.remove(filename)
# Remove the post_delete hook from rest_hooks, otherwise we'll have
# to load all of the outbounds into memory
post_delete.disconnect(
receiver=model_deleted, dispatch_uid="instance-deleted-hook"
)
query.delete()
post_delete.connect(
receiver=model_deleted, dispatch_uid="instance-deleted-hook"
)
|
praekeltfoundation/seed-message-sender
|
message_sender/tasks.py
|
ArchiveOutboundMessages.create_archived_outbound
|
python
|
def create_archived_outbound(self, date, filename):
with open(filename, "rb") as f:
f = File(f)
ArchivedOutbounds.objects.create(date=date, archive=f)
|
Creates the required ArchivedOutbound entry with the file specified
at `filename`
|
train
|
https://github.com/praekeltfoundation/seed-message-sender/blob/257b01635171b9dbe1f5f13baa810c971bb2620e/message_sender/tasks.py#L498-L505
| null |
class ArchiveOutboundMessages(Task):
"""
Task to archive the outbound messages and store the messages in the
storage backend
"""
name = "message_sender.tasks.archive_outbounds"
def filename(self, date):
"""
Returns the filename for the provided date
"""
return "outbounds-{}.gz".format(date.strftime("%Y-%m-%d"))
def dump_data(self, filename, queryset):
"""
Serializes the queryset into a newline separated JSON format, and
places it into a gzipped file
"""
with gzip.open(filename, "wb") as f:
for outbound in queryset.iterator():
data = OutboundArchiveSerializer(outbound).data
data = JSONRenderer().render(data)
f.write(data)
f.write("\n".encode("utf-8"))
def run(self, start_date, end_date):
start_date = datetime.strptime(start_date, "%Y-%m-%d").replace(tzinfo=pytz.UTC)
end_date = datetime.strptime(end_date, "%Y-%m-%d").replace(tzinfo=pytz.UTC)
for d in daterange(start_date, end_date):
if ArchivedOutbounds.objects.filter(date=d.date()).exists():
continue
query = Outbound.objects.filter(
created_at__gte=d, created_at__lt=(d + timedelta(1))
)
if not query.exists():
continue
filename = self.filename(d)
self.dump_data(filename, query)
self.create_archived_outbound(d, filename)
os.remove(filename)
# Remove the post_delete hook from rest_hooks, otherwise we'll have
# to load all of the outbounds into memory
post_delete.disconnect(
receiver=model_deleted, dispatch_uid="instance-deleted-hook"
)
query.delete()
post_delete.connect(
receiver=model_deleted, dispatch_uid="instance-deleted-hook"
)
|
praekeltfoundation/seed-message-sender
|
message_sender/formatters.py
|
e_164
|
python
|
def e_164(msisdn: str) -> str:
# Phonenumbers library requires the + to identify the country, so we add it if it
# does not already exist
number = phonenumbers.parse("+{}".format(msisdn.lstrip("+")), None)
return phonenumbers.format_number(number, phonenumbers.PhoneNumberFormat.E164)
|
Returns the msisdn in E.164 international format.
|
train
|
https://github.com/praekeltfoundation/seed-message-sender/blob/257b01635171b9dbe1f5f13baa810c971bb2620e/message_sender/formatters.py#L31-L38
| null |
import re
import phonenumbers
def noop(msisdn):
return msisdn
def vas2nets_voice(msisdn):
"""
FIXME: this should not need be in this repo
Vas2Nets is an aggregator in Nigeria, for some reason they need
MSISDNs prefixed with a 9 instead of the country code to initiate an OBD.
"""
return re.sub(r"\+?234(\d+)$", r"90\1", msisdn)
def vas2nets_text(msisdn):
"""
FIXME: this should not need be in this repo
Vas2Nets is an aggregator in Nigeria, they need MSISDNs in the local
format, prefixed with a 0, instead of the international format with the
country code.
"""
return re.sub(r"\+234(\d+)$", r"234\1", msisdn)
|
smnorris/bcdata
|
bcdata/cli.py
|
parse_db_url
|
python
|
def parse_db_url(db_url):
u = urlparse(db_url)
db = {}
db["database"] = u.path[1:]
db["user"] = u.username
db["password"] = u.password
db["host"] = u.hostname
db["port"] = u.port
return db
|
provided a db url, return a dict with connection properties
|
train
|
https://github.com/smnorris/bcdata/blob/de6b5bbc28d85e36613b51461911ee0a72a146c5/bcdata/cli.py#L27-L37
| null |
import json
import logging
import math
import os
import re
import subprocess
from urllib.parse import urlencode
from urllib.parse import urlparse
from functools import partial
from multiprocessing.dummy import Pool
from subprocess import call
import click
from cligj import indent_opt
from cligj import compact_opt
from owslib.wfs import WebFeatureService
import pgdata
import bcdata
bcdata.configure_logging()
log = logging.getLogger(__name__)
def get_objects(ctx, args, incomplete):
return [k for k in bcdata.list_tables() if incomplete in k]
# bounds handling direct from rasterio
# https://github.com/mapbox/rasterio/blob/master/rasterio/rio/options.py
# https://github.com/mapbox/rasterio/blob/master/rasterio/rio/clip.py
def from_like_context(ctx, param, value):
"""Return the value for an option from the context if the option
or `--all` is given, else return None."""
if ctx.obj and ctx.obj.get("like") and (value == "like" or ctx.obj.get("all_like")):
return ctx.obj["like"][param.name]
else:
return None
def bounds_handler(ctx, param, value):
"""Handle different forms of bounds."""
retval = from_like_context(ctx, param, value)
if retval is None and value is not None:
try:
value = value.strip(", []")
retval = tuple(float(x) for x in re.split(r"[,\s]+", value))
assert len(retval) == 4
return retval
except Exception:
raise click.BadParameter(
"{0!r} is not a valid bounding box representation".format(value)
)
else: # pragma: no cover
return retval
bounds_opt = click.option(
"--bounds",
default=None,
callback=bounds_handler,
help='Bounds: "left bottom right top" or "[left, bottom, right, top]".',
)
bounds_opt_required = click.option(
"--bounds",
required=True,
default=None,
callback=bounds_handler,
help='Bounds: "left bottom right top" or "[left, bottom, right, top]".',
)
dst_crs_opt = click.option("--dst-crs", "--dst_crs", help="Destination CRS.")
@click.group()
def cli():
pass
@cli.command()
@click.option("--refresh", "-r", is_flag=True, help="Refresh the cached list")
def list(refresh):
"""List DataBC layers available via WFS
"""
# This works too, but is much slower:
# ogrinfo WFS:http://openmaps.gov.bc.ca/geo/ows?VERSION=1.1.0
for table in bcdata.list_tables(refresh):
click.echo(table)
@cli.command()
@click.argument("dataset", type=click.STRING, autocompletion=get_objects)
@indent_opt
# Options to pick out a single metadata item and print it as
# a string.
@click.option(
"--count", "meta_member", flag_value="count", help="Print the count of features."
)
@click.option(
"--name", "meta_member", flag_value="name", help="Print the datasource's name."
)
def info(dataset, indent, meta_member):
"""Print basic metadata about a DataBC WFS layer as JSON.
Optionally print a single metadata item as a string.
"""
table = bcdata.validate_name(dataset)
wfs = WebFeatureService(url=bcdata.OWS_URL, version="2.0.0")
info = {}
info["name"] = table
info["count"] = bcdata.get_count(table)
info["schema"] = wfs.get_schema("pub:" + table)
if meta_member:
click.echo(info[meta_member])
else:
click.echo(json.dumps(info, indent=indent))
@cli.command()
@click.option("--out_file", "-o", help="Output file", default="dem25.tif")
@bounds_opt_required
@dst_crs_opt
@click.option("--src-crs", "--src_crs", help="CRS of provided bounds", default="EPSG:3005")
@click.option("--resolution", "-r", type=int, default=25)
def dem(bounds, src_crs, dst_crs, out_file, resolution):
"""Dump BC DEM to TIFF
"""
if not dst_crs:
dst_crs = "EPSG:3005"
bcdata.get_dem(bounds, out_file=out_file, src_crs=src_crs, dst_crs=dst_crs, resolution=resolution)
@cli.command()
@click.argument("dataset", type=click.STRING, autocompletion=get_objects)
@click.option(
"--query",
help="A valid CQL or ECQL query, quote enclosed (https://docs.geoserver.org/stable/en/user/tutorials/cql/cql_tutorial.html)",
)
@click.option("--out_file", "-o", help="Output file")
@bounds_opt
def dump(dataset, query, out_file, bounds):
"""Write DataBC features to stdout as GeoJSON feature collection.
\b
$ bcdata dump bc-airports
$ bcdata dump bc-airports --query "AIRPORT_NAME='Victoria Harbour (Shoal Point) Heliport'"
$ bcdata dump bc-airports --bounds xmin ymin xmax ymax
The values of --bounds must be in BC Albers.
It can also be combined to read bounds of a feature dataset using Fiona:
\b
$ bcdata dump bc-airports --bounds $(fio info aoi.shp --bounds)
"""
table = bcdata.validate_name(dataset)
data = bcdata.get_data(table, query=query, bounds=bounds)
if out_file:
with open(out_file, "w") as f:
json.dump(data.json(), f)
else:
sink = click.get_text_stream("stdout")
sink.write(json.dumps(data))
@cli.command()
@click.argument("dataset", type=click.STRING, autocompletion=get_objects)
@click.option(
"--query",
help="A valid `CQL` or `ECQL` query (https://docs.geoserver.org/stable/en/user/tutorials/cql/cql_tutorial.html)",
)
@bounds_opt
@indent_opt
@compact_opt
@dst_crs_opt
@click.option(
"--pagesize", "-p", default=10000, help="Max number of records to request"
)
@click.option("--sortby", "-s", help="Name of sort field")
def cat(dataset, query, bounds, indent, compact, dst_crs, pagesize, sortby):
"""Write DataBC features to stdout as GeoJSON feature objects.
"""
# Note that cat does not concatenate!
dump_kwds = {"sort_keys": True}
if indent:
dump_kwds["indent"] = indent
if compact:
dump_kwds["separators"] = (",", ":")
table = bcdata.validate_name(dataset)
for feat in bcdata.get_features(
table, query=query, bounds=bounds, sortby=sortby, crs=dst_crs
):
click.echo(json.dumps(feat, **dump_kwds))
@cli.command()
@click.argument("dataset", type=click.STRING, autocompletion=get_objects)
@click.option(
"--db_url",
"-db",
help="SQLAlchemy database url",
default=os.environ.get("DATABASE_URL"),
)
@click.option("--table", help="Destination table name")
@click.option("--schema", help="Destination schema name")
@click.option(
"--query",
help="A valid `CQL` or `ECQL` query (https://docs.geoserver.org/stable/en/user/tutorials/cql/cql_tutorial.html)",
)
@click.option("--append", is_flag=True, help="Append to existing table")
@click.option(
"--pagesize", "-p", default=10000, help="Max number of records to request"
)
@click.option("--sortby", "-s", help="Name of sort field")
@click.option(
"--max_workers", "-w", default=5, help="Max number of concurrent requests"
)
def bc2pg(dataset, db_url, table, schema, query, append, pagesize, sortby, max_workers):
"""Download a DataBC WFS layer to postgres - an ogr2ogr wrapper.
\b
$ bcdata bc2pg bc-airports --db_url postgresql://postgres:postgres@localhost:5432/postgis
The default target database can be specified by setting the $DATABASE_URL
environment variable.
https://docs.sqlalchemy.org/en/latest/core/engines.html#database-urls
"""
src = bcdata.validate_name(dataset)
src_schema, src_table = [i.lower() for i in src.split(".")]
if not schema:
schema = src_schema
if not table:
table = src_table
# create schema if it does not exist
conn = pgdata.connect(db_url)
if schema not in conn.schemas:
click.echo("Schema {} does not exist, creating it".format(schema))
conn.create_schema(schema)
# build parameters for each required request
param_dicts = bcdata.define_request(
dataset, query=query, sortby=sortby, pagesize=pagesize
)
try:
# run the first request / load
payload = urlencode(param_dicts[0], doseq=True)
url = bcdata.WFS_URL + "?" + payload
db = parse_db_url(db_url)
db_string = "PG:host={h} user={u} dbname={db} password={pwd}".format(
h=db["host"], u=db["user"], db=db["database"], pwd=db["password"]
)
# create the table
if not append:
command = [
"ogr2ogr",
"-lco",
"OVERWRITE=YES",
"-lco",
"SCHEMA={}".format(schema),
"-lco",
"GEOMETRY_NAME=geom",
"-f",
"PostgreSQL",
db_string,
"-t_srs",
"EPSG:3005",
"-nln",
table,
url,
]
click.echo(" ".join(command))
subprocess.run(command)
# append to table when append specified or processing many chunks
if len(param_dicts) > 1 or append:
# define starting index in list of requests
if append:
idx = 0
else:
idx = 1
commands = []
for chunk, paramdict in enumerate(param_dicts[idx:]):
payload = urlencode(paramdict, doseq=True)
url = bcdata.WFS_URL + "?" + payload
command = [
"ogr2ogr",
"-update",
"-append",
"-f",
"PostgreSQL",
db_string + " active_schema=" + schema,
"-t_srs",
"EPSG:3005",
"-nln",
table,
url,
]
commands.append(command)
# https://stackoverflow.com/questions/14533458
pool = Pool(max_workers)
with click.progressbar(
pool.imap(partial(call), commands), length=len(param_dicts)
) as bar:
for returncode in bar:
if returncode != 0:
click.echo("Command failed: {}".format(returncode))
click.echo(
"Load of {} to {} in {} complete".format(src, schema + "." + table, db_url)
)
except Exception:
click.echo("Data load failed")
raise click.Abort()
|
smnorris/bcdata
|
bcdata/cli.py
|
from_like_context
|
python
|
def from_like_context(ctx, param, value):
if ctx.obj and ctx.obj.get("like") and (value == "like" or ctx.obj.get("all_like")):
return ctx.obj["like"][param.name]
else:
return None
|
Return the value for an option from the context if the option
or `--all` is given, else return None.
|
train
|
https://github.com/smnorris/bcdata/blob/de6b5bbc28d85e36613b51461911ee0a72a146c5/bcdata/cli.py#L49-L55
| null |
import json
import logging
import math
import os
import re
import subprocess
from urllib.parse import urlencode
from urllib.parse import urlparse
from functools import partial
from multiprocessing.dummy import Pool
from subprocess import call
import click
from cligj import indent_opt
from cligj import compact_opt
from owslib.wfs import WebFeatureService
import pgdata
import bcdata
bcdata.configure_logging()
log = logging.getLogger(__name__)
def parse_db_url(db_url):
"""provided a db url, return a dict with connection properties
"""
u = urlparse(db_url)
db = {}
db["database"] = u.path[1:]
db["user"] = u.username
db["password"] = u.password
db["host"] = u.hostname
db["port"] = u.port
return db
def get_objects(ctx, args, incomplete):
return [k for k in bcdata.list_tables() if incomplete in k]
# bounds handling direct from rasterio
# https://github.com/mapbox/rasterio/blob/master/rasterio/rio/options.py
# https://github.com/mapbox/rasterio/blob/master/rasterio/rio/clip.py
def bounds_handler(ctx, param, value):
"""Handle different forms of bounds."""
retval = from_like_context(ctx, param, value)
if retval is None and value is not None:
try:
value = value.strip(", []")
retval = tuple(float(x) for x in re.split(r"[,\s]+", value))
assert len(retval) == 4
return retval
except Exception:
raise click.BadParameter(
"{0!r} is not a valid bounding box representation".format(value)
)
else: # pragma: no cover
return retval
bounds_opt = click.option(
"--bounds",
default=None,
callback=bounds_handler,
help='Bounds: "left bottom right top" or "[left, bottom, right, top]".',
)
bounds_opt_required = click.option(
"--bounds",
required=True,
default=None,
callback=bounds_handler,
help='Bounds: "left bottom right top" or "[left, bottom, right, top]".',
)
dst_crs_opt = click.option("--dst-crs", "--dst_crs", help="Destination CRS.")
@click.group()
def cli():
pass
@cli.command()
@click.option("--refresh", "-r", is_flag=True, help="Refresh the cached list")
def list(refresh):
"""List DataBC layers available via WFS
"""
# This works too, but is much slower:
# ogrinfo WFS:http://openmaps.gov.bc.ca/geo/ows?VERSION=1.1.0
for table in bcdata.list_tables(refresh):
click.echo(table)
@cli.command()
@click.argument("dataset", type=click.STRING, autocompletion=get_objects)
@indent_opt
# Options to pick out a single metadata item and print it as
# a string.
@click.option(
"--count", "meta_member", flag_value="count", help="Print the count of features."
)
@click.option(
"--name", "meta_member", flag_value="name", help="Print the datasource's name."
)
def info(dataset, indent, meta_member):
"""Print basic metadata about a DataBC WFS layer as JSON.
Optionally print a single metadata item as a string.
"""
table = bcdata.validate_name(dataset)
wfs = WebFeatureService(url=bcdata.OWS_URL, version="2.0.0")
info = {}
info["name"] = table
info["count"] = bcdata.get_count(table)
info["schema"] = wfs.get_schema("pub:" + table)
if meta_member:
click.echo(info[meta_member])
else:
click.echo(json.dumps(info, indent=indent))
@cli.command()
@click.option("--out_file", "-o", help="Output file", default="dem25.tif")
@bounds_opt_required
@dst_crs_opt
@click.option("--src-crs", "--src_crs", help="CRS of provided bounds", default="EPSG:3005")
@click.option("--resolution", "-r", type=int, default=25)
def dem(bounds, src_crs, dst_crs, out_file, resolution):
"""Dump BC DEM to TIFF
"""
if not dst_crs:
dst_crs = "EPSG:3005"
bcdata.get_dem(bounds, out_file=out_file, src_crs=src_crs, dst_crs=dst_crs, resolution=resolution)
@cli.command()
@click.argument("dataset", type=click.STRING, autocompletion=get_objects)
@click.option(
"--query",
help="A valid CQL or ECQL query, quote enclosed (https://docs.geoserver.org/stable/en/user/tutorials/cql/cql_tutorial.html)",
)
@click.option("--out_file", "-o", help="Output file")
@bounds_opt
def dump(dataset, query, out_file, bounds):
"""Write DataBC features to stdout as GeoJSON feature collection.
\b
$ bcdata dump bc-airports
$ bcdata dump bc-airports --query "AIRPORT_NAME='Victoria Harbour (Shoal Point) Heliport'"
$ bcdata dump bc-airports --bounds xmin ymin xmax ymax
The values of --bounds must be in BC Albers.
It can also be combined to read bounds of a feature dataset using Fiona:
\b
$ bcdata dump bc-airports --bounds $(fio info aoi.shp --bounds)
"""
table = bcdata.validate_name(dataset)
data = bcdata.get_data(table, query=query, bounds=bounds)
if out_file:
with open(out_file, "w") as f:
json.dump(data.json(), f)
else:
sink = click.get_text_stream("stdout")
sink.write(json.dumps(data))
@cli.command()
@click.argument("dataset", type=click.STRING, autocompletion=get_objects)
@click.option(
"--query",
help="A valid `CQL` or `ECQL` query (https://docs.geoserver.org/stable/en/user/tutorials/cql/cql_tutorial.html)",
)
@bounds_opt
@indent_opt
@compact_opt
@dst_crs_opt
@click.option(
"--pagesize", "-p", default=10000, help="Max number of records to request"
)
@click.option("--sortby", "-s", help="Name of sort field")
def cat(dataset, query, bounds, indent, compact, dst_crs, pagesize, sortby):
"""Write DataBC features to stdout as GeoJSON feature objects.
"""
# Note that cat does not concatenate!
dump_kwds = {"sort_keys": True}
if indent:
dump_kwds["indent"] = indent
if compact:
dump_kwds["separators"] = (",", ":")
table = bcdata.validate_name(dataset)
for feat in bcdata.get_features(
table, query=query, bounds=bounds, sortby=sortby, crs=dst_crs
):
click.echo(json.dumps(feat, **dump_kwds))
@cli.command()
@click.argument("dataset", type=click.STRING, autocompletion=get_objects)
@click.option(
"--db_url",
"-db",
help="SQLAlchemy database url",
default=os.environ.get("DATABASE_URL"),
)
@click.option("--table", help="Destination table name")
@click.option("--schema", help="Destination schema name")
@click.option(
"--query",
help="A valid `CQL` or `ECQL` query (https://docs.geoserver.org/stable/en/user/tutorials/cql/cql_tutorial.html)",
)
@click.option("--append", is_flag=True, help="Append to existing table")
@click.option(
"--pagesize", "-p", default=10000, help="Max number of records to request"
)
@click.option("--sortby", "-s", help="Name of sort field")
@click.option(
"--max_workers", "-w", default=5, help="Max number of concurrent requests"
)
def bc2pg(dataset, db_url, table, schema, query, append, pagesize, sortby, max_workers):
"""Download a DataBC WFS layer to postgres - an ogr2ogr wrapper.
\b
$ bcdata bc2pg bc-airports --db_url postgresql://postgres:postgres@localhost:5432/postgis
The default target database can be specified by setting the $DATABASE_URL
environment variable.
https://docs.sqlalchemy.org/en/latest/core/engines.html#database-urls
"""
src = bcdata.validate_name(dataset)
src_schema, src_table = [i.lower() for i in src.split(".")]
if not schema:
schema = src_schema
if not table:
table = src_table
# create schema if it does not exist
conn = pgdata.connect(db_url)
if schema not in conn.schemas:
click.echo("Schema {} does not exist, creating it".format(schema))
conn.create_schema(schema)
# build parameters for each required request
param_dicts = bcdata.define_request(
dataset, query=query, sortby=sortby, pagesize=pagesize
)
try:
# run the first request / load
payload = urlencode(param_dicts[0], doseq=True)
url = bcdata.WFS_URL + "?" + payload
db = parse_db_url(db_url)
db_string = "PG:host={h} user={u} dbname={db} password={pwd}".format(
h=db["host"], u=db["user"], db=db["database"], pwd=db["password"]
)
# create the table
if not append:
command = [
"ogr2ogr",
"-lco",
"OVERWRITE=YES",
"-lco",
"SCHEMA={}".format(schema),
"-lco",
"GEOMETRY_NAME=geom",
"-f",
"PostgreSQL",
db_string,
"-t_srs",
"EPSG:3005",
"-nln",
table,
url,
]
click.echo(" ".join(command))
subprocess.run(command)
# append to table when append specified or processing many chunks
if len(param_dicts) > 1 or append:
# define starting index in list of requests
if append:
idx = 0
else:
idx = 1
commands = []
for chunk, paramdict in enumerate(param_dicts[idx:]):
payload = urlencode(paramdict, doseq=True)
url = bcdata.WFS_URL + "?" + payload
command = [
"ogr2ogr",
"-update",
"-append",
"-f",
"PostgreSQL",
db_string + " active_schema=" + schema,
"-t_srs",
"EPSG:3005",
"-nln",
table,
url,
]
commands.append(command)
# https://stackoverflow.com/questions/14533458
pool = Pool(max_workers)
with click.progressbar(
pool.imap(partial(call), commands), length=len(param_dicts)
) as bar:
for returncode in bar:
if returncode != 0:
click.echo("Command failed: {}".format(returncode))
click.echo(
"Load of {} to {} in {} complete".format(src, schema + "." + table, db_url)
)
except Exception:
click.echo("Data load failed")
raise click.Abort()
|
smnorris/bcdata
|
bcdata/cli.py
|
bounds_handler
|
python
|
def bounds_handler(ctx, param, value):
retval = from_like_context(ctx, param, value)
if retval is None and value is not None:
try:
value = value.strip(", []")
retval = tuple(float(x) for x in re.split(r"[,\s]+", value))
assert len(retval) == 4
return retval
except Exception:
raise click.BadParameter(
"{0!r} is not a valid bounding box representation".format(value)
)
else: # pragma: no cover
return retval
|
Handle different forms of bounds.
|
train
|
https://github.com/smnorris/bcdata/blob/de6b5bbc28d85e36613b51461911ee0a72a146c5/bcdata/cli.py#L58-L72
|
[
"def from_like_context(ctx, param, value):\n \"\"\"Return the value for an option from the context if the option\n or `--all` is given, else return None.\"\"\"\n if ctx.obj and ctx.obj.get(\"like\") and (value == \"like\" or ctx.obj.get(\"all_like\")):\n return ctx.obj[\"like\"][param.name]\n else:\n return None\n"
] |
import json
import logging
import math
import os
import re
import subprocess
from urllib.parse import urlencode
from urllib.parse import urlparse
from functools import partial
from multiprocessing.dummy import Pool
from subprocess import call
import click
from cligj import indent_opt
from cligj import compact_opt
from owslib.wfs import WebFeatureService
import pgdata
import bcdata
bcdata.configure_logging()
log = logging.getLogger(__name__)
def parse_db_url(db_url):
"""provided a db url, return a dict with connection properties
"""
u = urlparse(db_url)
db = {}
db["database"] = u.path[1:]
db["user"] = u.username
db["password"] = u.password
db["host"] = u.hostname
db["port"] = u.port
return db
def get_objects(ctx, args, incomplete):
return [k for k in bcdata.list_tables() if incomplete in k]
# bounds handling direct from rasterio
# https://github.com/mapbox/rasterio/blob/master/rasterio/rio/options.py
# https://github.com/mapbox/rasterio/blob/master/rasterio/rio/clip.py
def from_like_context(ctx, param, value):
"""Return the value for an option from the context if the option
or `--all` is given, else return None."""
if ctx.obj and ctx.obj.get("like") and (value == "like" or ctx.obj.get("all_like")):
return ctx.obj["like"][param.name]
else:
return None
bounds_opt = click.option(
"--bounds",
default=None,
callback=bounds_handler,
help='Bounds: "left bottom right top" or "[left, bottom, right, top]".',
)
bounds_opt_required = click.option(
"--bounds",
required=True,
default=None,
callback=bounds_handler,
help='Bounds: "left bottom right top" or "[left, bottom, right, top]".',
)
dst_crs_opt = click.option("--dst-crs", "--dst_crs", help="Destination CRS.")
@click.group()
def cli():
pass
@cli.command()
@click.option("--refresh", "-r", is_flag=True, help="Refresh the cached list")
def list(refresh):
"""List DataBC layers available via WFS
"""
# This works too, but is much slower:
# ogrinfo WFS:http://openmaps.gov.bc.ca/geo/ows?VERSION=1.1.0
for table in bcdata.list_tables(refresh):
click.echo(table)
@cli.command()
@click.argument("dataset", type=click.STRING, autocompletion=get_objects)
@indent_opt
# Options to pick out a single metadata item and print it as
# a string.
@click.option(
"--count", "meta_member", flag_value="count", help="Print the count of features."
)
@click.option(
"--name", "meta_member", flag_value="name", help="Print the datasource's name."
)
def info(dataset, indent, meta_member):
"""Print basic metadata about a DataBC WFS layer as JSON.
Optionally print a single metadata item as a string.
"""
table = bcdata.validate_name(dataset)
wfs = WebFeatureService(url=bcdata.OWS_URL, version="2.0.0")
info = {}
info["name"] = table
info["count"] = bcdata.get_count(table)
info["schema"] = wfs.get_schema("pub:" + table)
if meta_member:
click.echo(info[meta_member])
else:
click.echo(json.dumps(info, indent=indent))
@cli.command()
@click.option("--out_file", "-o", help="Output file", default="dem25.tif")
@bounds_opt_required
@dst_crs_opt
@click.option("--src-crs", "--src_crs", help="CRS of provided bounds", default="EPSG:3005")
@click.option("--resolution", "-r", type=int, default=25)
def dem(bounds, src_crs, dst_crs, out_file, resolution):
"""Dump BC DEM to TIFF
"""
if not dst_crs:
dst_crs = "EPSG:3005"
bcdata.get_dem(bounds, out_file=out_file, src_crs=src_crs, dst_crs=dst_crs, resolution=resolution)
@cli.command()
@click.argument("dataset", type=click.STRING, autocompletion=get_objects)
@click.option(
"--query",
help="A valid CQL or ECQL query, quote enclosed (https://docs.geoserver.org/stable/en/user/tutorials/cql/cql_tutorial.html)",
)
@click.option("--out_file", "-o", help="Output file")
@bounds_opt
def dump(dataset, query, out_file, bounds):
"""Write DataBC features to stdout as GeoJSON feature collection.
\b
$ bcdata dump bc-airports
$ bcdata dump bc-airports --query "AIRPORT_NAME='Victoria Harbour (Shoal Point) Heliport'"
$ bcdata dump bc-airports --bounds xmin ymin xmax ymax
The values of --bounds must be in BC Albers.
It can also be combined to read bounds of a feature dataset using Fiona:
\b
$ bcdata dump bc-airports --bounds $(fio info aoi.shp --bounds)
"""
table = bcdata.validate_name(dataset)
data = bcdata.get_data(table, query=query, bounds=bounds)
if out_file:
with open(out_file, "w") as f:
json.dump(data.json(), f)
else:
sink = click.get_text_stream("stdout")
sink.write(json.dumps(data))
@cli.command()
@click.argument("dataset", type=click.STRING, autocompletion=get_objects)
@click.option(
"--query",
help="A valid `CQL` or `ECQL` query (https://docs.geoserver.org/stable/en/user/tutorials/cql/cql_tutorial.html)",
)
@bounds_opt
@indent_opt
@compact_opt
@dst_crs_opt
@click.option(
"--pagesize", "-p", default=10000, help="Max number of records to request"
)
@click.option("--sortby", "-s", help="Name of sort field")
def cat(dataset, query, bounds, indent, compact, dst_crs, pagesize, sortby):
"""Write DataBC features to stdout as GeoJSON feature objects.
"""
# Note that cat does not concatenate!
dump_kwds = {"sort_keys": True}
if indent:
dump_kwds["indent"] = indent
if compact:
dump_kwds["separators"] = (",", ":")
table = bcdata.validate_name(dataset)
for feat in bcdata.get_features(
table, query=query, bounds=bounds, sortby=sortby, crs=dst_crs
):
click.echo(json.dumps(feat, **dump_kwds))
@cli.command()
@click.argument("dataset", type=click.STRING, autocompletion=get_objects)
@click.option(
"--db_url",
"-db",
help="SQLAlchemy database url",
default=os.environ.get("DATABASE_URL"),
)
@click.option("--table", help="Destination table name")
@click.option("--schema", help="Destination schema name")
@click.option(
"--query",
help="A valid `CQL` or `ECQL` query (https://docs.geoserver.org/stable/en/user/tutorials/cql/cql_tutorial.html)",
)
@click.option("--append", is_flag=True, help="Append to existing table")
@click.option(
"--pagesize", "-p", default=10000, help="Max number of records to request"
)
@click.option("--sortby", "-s", help="Name of sort field")
@click.option(
"--max_workers", "-w", default=5, help="Max number of concurrent requests"
)
def bc2pg(dataset, db_url, table, schema, query, append, pagesize, sortby, max_workers):
"""Download a DataBC WFS layer to postgres - an ogr2ogr wrapper.
\b
$ bcdata bc2pg bc-airports --db_url postgresql://postgres:postgres@localhost:5432/postgis
The default target database can be specified by setting the $DATABASE_URL
environment variable.
https://docs.sqlalchemy.org/en/latest/core/engines.html#database-urls
"""
src = bcdata.validate_name(dataset)
src_schema, src_table = [i.lower() for i in src.split(".")]
if not schema:
schema = src_schema
if not table:
table = src_table
# create schema if it does not exist
conn = pgdata.connect(db_url)
if schema not in conn.schemas:
click.echo("Schema {} does not exist, creating it".format(schema))
conn.create_schema(schema)
# build parameters for each required request
param_dicts = bcdata.define_request(
dataset, query=query, sortby=sortby, pagesize=pagesize
)
try:
# run the first request / load
payload = urlencode(param_dicts[0], doseq=True)
url = bcdata.WFS_URL + "?" + payload
db = parse_db_url(db_url)
db_string = "PG:host={h} user={u} dbname={db} password={pwd}".format(
h=db["host"], u=db["user"], db=db["database"], pwd=db["password"]
)
# create the table
if not append:
command = [
"ogr2ogr",
"-lco",
"OVERWRITE=YES",
"-lco",
"SCHEMA={}".format(schema),
"-lco",
"GEOMETRY_NAME=geom",
"-f",
"PostgreSQL",
db_string,
"-t_srs",
"EPSG:3005",
"-nln",
table,
url,
]
click.echo(" ".join(command))
subprocess.run(command)
# append to table when append specified or processing many chunks
if len(param_dicts) > 1 or append:
# define starting index in list of requests
if append:
idx = 0
else:
idx = 1
commands = []
for chunk, paramdict in enumerate(param_dicts[idx:]):
payload = urlencode(paramdict, doseq=True)
url = bcdata.WFS_URL + "?" + payload
command = [
"ogr2ogr",
"-update",
"-append",
"-f",
"PostgreSQL",
db_string + " active_schema=" + schema,
"-t_srs",
"EPSG:3005",
"-nln",
table,
url,
]
commands.append(command)
# https://stackoverflow.com/questions/14533458
pool = Pool(max_workers)
with click.progressbar(
pool.imap(partial(call), commands), length=len(param_dicts)
) as bar:
for returncode in bar:
if returncode != 0:
click.echo("Command failed: {}".format(returncode))
click.echo(
"Load of {} to {} in {} complete".format(src, schema + "." + table, db_url)
)
except Exception:
click.echo("Data load failed")
raise click.Abort()
|
smnorris/bcdata
|
bcdata/cli.py
|
info
|
python
|
def info(dataset, indent, meta_member):
table = bcdata.validate_name(dataset)
wfs = WebFeatureService(url=bcdata.OWS_URL, version="2.0.0")
info = {}
info["name"] = table
info["count"] = bcdata.get_count(table)
info["schema"] = wfs.get_schema("pub:" + table)
if meta_member:
click.echo(info[meta_member])
else:
click.echo(json.dumps(info, indent=indent))
|
Print basic metadata about a DataBC WFS layer as JSON.
Optionally print a single metadata item as a string.
|
train
|
https://github.com/smnorris/bcdata/blob/de6b5bbc28d85e36613b51461911ee0a72a146c5/bcdata/cli.py#L120-L134
|
[
"def get_count(dataset, query=None):\n \"\"\"Ask DataBC WFS how many features there are in a table/query\n \"\"\"\n # https://gis.stackexchange.com/questions/45101/only-return-the-numberoffeatures-in-a-wfs-query\n table = validate_name(dataset)\n payload = {\n \"service\": \"WFS\",\n \"version\": \"2.0.0\",\n \"request\": \"GetFeature\",\n \"typeName\": table,\n \"resultType\": \"hits\",\n \"outputFormat\": \"json\",\n }\n if query:\n payload[\"CQL_FILTER\"] = query\n r = requests.get(bcdata.WFS_URL, params=payload)\n return int(ET.fromstring(r.text).attrib[\"numberMatched\"])\n",
"def validate_name(dataset):\n \"\"\"Check wfs/cache and the bcdc api to see if dataset name is valid\n \"\"\"\n if dataset in list_tables():\n return dataset\n else:\n return bcdc_package_show(dataset)[\"object_name\"]\n"
] |
import json
import logging
import math
import os
import re
import subprocess
from urllib.parse import urlencode
from urllib.parse import urlparse
from functools import partial
from multiprocessing.dummy import Pool
from subprocess import call
import click
from cligj import indent_opt
from cligj import compact_opt
from owslib.wfs import WebFeatureService
import pgdata
import bcdata
bcdata.configure_logging()
log = logging.getLogger(__name__)
def parse_db_url(db_url):
"""provided a db url, return a dict with connection properties
"""
u = urlparse(db_url)
db = {}
db["database"] = u.path[1:]
db["user"] = u.username
db["password"] = u.password
db["host"] = u.hostname
db["port"] = u.port
return db
def get_objects(ctx, args, incomplete):
return [k for k in bcdata.list_tables() if incomplete in k]
# bounds handling direct from rasterio
# https://github.com/mapbox/rasterio/blob/master/rasterio/rio/options.py
# https://github.com/mapbox/rasterio/blob/master/rasterio/rio/clip.py
def from_like_context(ctx, param, value):
"""Return the value for an option from the context if the option
or `--all` is given, else return None."""
if ctx.obj and ctx.obj.get("like") and (value == "like" or ctx.obj.get("all_like")):
return ctx.obj["like"][param.name]
else:
return None
def bounds_handler(ctx, param, value):
"""Handle different forms of bounds."""
retval = from_like_context(ctx, param, value)
if retval is None and value is not None:
try:
value = value.strip(", []")
retval = tuple(float(x) for x in re.split(r"[,\s]+", value))
assert len(retval) == 4
return retval
except Exception:
raise click.BadParameter(
"{0!r} is not a valid bounding box representation".format(value)
)
else: # pragma: no cover
return retval
bounds_opt = click.option(
"--bounds",
default=None,
callback=bounds_handler,
help='Bounds: "left bottom right top" or "[left, bottom, right, top]".',
)
bounds_opt_required = click.option(
"--bounds",
required=True,
default=None,
callback=bounds_handler,
help='Bounds: "left bottom right top" or "[left, bottom, right, top]".',
)
dst_crs_opt = click.option("--dst-crs", "--dst_crs", help="Destination CRS.")
@click.group()
def cli():
pass
@cli.command()
@click.option("--refresh", "-r", is_flag=True, help="Refresh the cached list")
def list(refresh):
"""List DataBC layers available via WFS
"""
# This works too, but is much slower:
# ogrinfo WFS:http://openmaps.gov.bc.ca/geo/ows?VERSION=1.1.0
for table in bcdata.list_tables(refresh):
click.echo(table)
@cli.command()
@click.argument("dataset", type=click.STRING, autocompletion=get_objects)
@indent_opt
# Options to pick out a single metadata item and print it as
# a string.
@click.option(
"--count", "meta_member", flag_value="count", help="Print the count of features."
)
@click.option(
"--name", "meta_member", flag_value="name", help="Print the datasource's name."
)
@cli.command()
@click.option("--out_file", "-o", help="Output file", default="dem25.tif")
@bounds_opt_required
@dst_crs_opt
@click.option("--src-crs", "--src_crs", help="CRS of provided bounds", default="EPSG:3005")
@click.option("--resolution", "-r", type=int, default=25)
def dem(bounds, src_crs, dst_crs, out_file, resolution):
"""Dump BC DEM to TIFF
"""
if not dst_crs:
dst_crs = "EPSG:3005"
bcdata.get_dem(bounds, out_file=out_file, src_crs=src_crs, dst_crs=dst_crs, resolution=resolution)
@cli.command()
@click.argument("dataset", type=click.STRING, autocompletion=get_objects)
@click.option(
"--query",
help="A valid CQL or ECQL query, quote enclosed (https://docs.geoserver.org/stable/en/user/tutorials/cql/cql_tutorial.html)",
)
@click.option("--out_file", "-o", help="Output file")
@bounds_opt
def dump(dataset, query, out_file, bounds):
"""Write DataBC features to stdout as GeoJSON feature collection.
\b
$ bcdata dump bc-airports
$ bcdata dump bc-airports --query "AIRPORT_NAME='Victoria Harbour (Shoal Point) Heliport'"
$ bcdata dump bc-airports --bounds xmin ymin xmax ymax
The values of --bounds must be in BC Albers.
It can also be combined to read bounds of a feature dataset using Fiona:
\b
$ bcdata dump bc-airports --bounds $(fio info aoi.shp --bounds)
"""
table = bcdata.validate_name(dataset)
data = bcdata.get_data(table, query=query, bounds=bounds)
if out_file:
with open(out_file, "w") as f:
json.dump(data.json(), f)
else:
sink = click.get_text_stream("stdout")
sink.write(json.dumps(data))
@cli.command()
@click.argument("dataset", type=click.STRING, autocompletion=get_objects)
@click.option(
"--query",
help="A valid `CQL` or `ECQL` query (https://docs.geoserver.org/stable/en/user/tutorials/cql/cql_tutorial.html)",
)
@bounds_opt
@indent_opt
@compact_opt
@dst_crs_opt
@click.option(
"--pagesize", "-p", default=10000, help="Max number of records to request"
)
@click.option("--sortby", "-s", help="Name of sort field")
def cat(dataset, query, bounds, indent, compact, dst_crs, pagesize, sortby):
"""Write DataBC features to stdout as GeoJSON feature objects.
"""
# Note that cat does not concatenate!
dump_kwds = {"sort_keys": True}
if indent:
dump_kwds["indent"] = indent
if compact:
dump_kwds["separators"] = (",", ":")
table = bcdata.validate_name(dataset)
for feat in bcdata.get_features(
table, query=query, bounds=bounds, sortby=sortby, crs=dst_crs
):
click.echo(json.dumps(feat, **dump_kwds))
@cli.command()
@click.argument("dataset", type=click.STRING, autocompletion=get_objects)
@click.option(
"--db_url",
"-db",
help="SQLAlchemy database url",
default=os.environ.get("DATABASE_URL"),
)
@click.option("--table", help="Destination table name")
@click.option("--schema", help="Destination schema name")
@click.option(
"--query",
help="A valid `CQL` or `ECQL` query (https://docs.geoserver.org/stable/en/user/tutorials/cql/cql_tutorial.html)",
)
@click.option("--append", is_flag=True, help="Append to existing table")
@click.option(
"--pagesize", "-p", default=10000, help="Max number of records to request"
)
@click.option("--sortby", "-s", help="Name of sort field")
@click.option(
"--max_workers", "-w", default=5, help="Max number of concurrent requests"
)
def bc2pg(dataset, db_url, table, schema, query, append, pagesize, sortby, max_workers):
"""Download a DataBC WFS layer to postgres - an ogr2ogr wrapper.
\b
$ bcdata bc2pg bc-airports --db_url postgresql://postgres:postgres@localhost:5432/postgis
The default target database can be specified by setting the $DATABASE_URL
environment variable.
https://docs.sqlalchemy.org/en/latest/core/engines.html#database-urls
"""
src = bcdata.validate_name(dataset)
src_schema, src_table = [i.lower() for i in src.split(".")]
if not schema:
schema = src_schema
if not table:
table = src_table
# create schema if it does not exist
conn = pgdata.connect(db_url)
if schema not in conn.schemas:
click.echo("Schema {} does not exist, creating it".format(schema))
conn.create_schema(schema)
# build parameters for each required request
param_dicts = bcdata.define_request(
dataset, query=query, sortby=sortby, pagesize=pagesize
)
try:
# run the first request / load
payload = urlencode(param_dicts[0], doseq=True)
url = bcdata.WFS_URL + "?" + payload
db = parse_db_url(db_url)
db_string = "PG:host={h} user={u} dbname={db} password={pwd}".format(
h=db["host"], u=db["user"], db=db["database"], pwd=db["password"]
)
# create the table
if not append:
command = [
"ogr2ogr",
"-lco",
"OVERWRITE=YES",
"-lco",
"SCHEMA={}".format(schema),
"-lco",
"GEOMETRY_NAME=geom",
"-f",
"PostgreSQL",
db_string,
"-t_srs",
"EPSG:3005",
"-nln",
table,
url,
]
click.echo(" ".join(command))
subprocess.run(command)
# append to table when append specified or processing many chunks
if len(param_dicts) > 1 or append:
# define starting index in list of requests
if append:
idx = 0
else:
idx = 1
commands = []
for chunk, paramdict in enumerate(param_dicts[idx:]):
payload = urlencode(paramdict, doseq=True)
url = bcdata.WFS_URL + "?" + payload
command = [
"ogr2ogr",
"-update",
"-append",
"-f",
"PostgreSQL",
db_string + " active_schema=" + schema,
"-t_srs",
"EPSG:3005",
"-nln",
table,
url,
]
commands.append(command)
# https://stackoverflow.com/questions/14533458
pool = Pool(max_workers)
with click.progressbar(
pool.imap(partial(call), commands), length=len(param_dicts)
) as bar:
for returncode in bar:
if returncode != 0:
click.echo("Command failed: {}".format(returncode))
click.echo(
"Load of {} to {} in {} complete".format(src, schema + "." + table, db_url)
)
except Exception:
click.echo("Data load failed")
raise click.Abort()
|
smnorris/bcdata
|
bcdata/cli.py
|
dem
|
python
|
def dem(bounds, src_crs, dst_crs, out_file, resolution):
if not dst_crs:
dst_crs = "EPSG:3005"
bcdata.get_dem(bounds, out_file=out_file, src_crs=src_crs, dst_crs=dst_crs, resolution=resolution)
|
Dump BC DEM to TIFF
|
train
|
https://github.com/smnorris/bcdata/blob/de6b5bbc28d85e36613b51461911ee0a72a146c5/bcdata/cli.py#L143-L148
|
[
"def get_dem(bounds, out_file=\"dem.tif\", src_crs=\"EPSG:3005\", dst_crs=\"EPSG:3005\", resolution=25):\n \"\"\"Get 25m DEM for provided bounds, write to GeoTIFF\n \"\"\"\n bbox = \",\".join([str(b) for b in bounds])\n # todo: validate resolution units are equivalent to src_crs units\n # build request\n payload = {\n \"service\": \"WCS\",\n \"version\": \"1.0.0\",\n \"request\": \"GetCoverage\",\n \"coverage\": \"pub:bc_elevation_25m_bcalb\",\n \"Format\": \"GeoTIFF\",\n \"bbox\": bbox,\n \"CRS\": src_crs,\n \"RESPONSE_CRS\": dst_crs,\n \"resx\": str(resolution),\n \"resy\": str(resolution),\n }\n # request data from WCS\n r = requests.get(bcdata.WCS_URL, params=payload)\n # save to tiff\n if r.status_code == 200:\n with open(out_file, \"wb\") as file:\n file.write(r.content)\n return out_file\n else:\n raise RuntimeError(\n \"WCS request failed with status code {}\".format(str(r.status_code))\n )\n"
] |
import json
import logging
import math
import os
import re
import subprocess
from urllib.parse import urlencode
from urllib.parse import urlparse
from functools import partial
from multiprocessing.dummy import Pool
from subprocess import call
import click
from cligj import indent_opt
from cligj import compact_opt
from owslib.wfs import WebFeatureService
import pgdata
import bcdata
bcdata.configure_logging()
log = logging.getLogger(__name__)
def parse_db_url(db_url):
"""provided a db url, return a dict with connection properties
"""
u = urlparse(db_url)
db = {}
db["database"] = u.path[1:]
db["user"] = u.username
db["password"] = u.password
db["host"] = u.hostname
db["port"] = u.port
return db
def get_objects(ctx, args, incomplete):
return [k for k in bcdata.list_tables() if incomplete in k]
# bounds handling direct from rasterio
# https://github.com/mapbox/rasterio/blob/master/rasterio/rio/options.py
# https://github.com/mapbox/rasterio/blob/master/rasterio/rio/clip.py
def from_like_context(ctx, param, value):
"""Return the value for an option from the context if the option
or `--all` is given, else return None."""
if ctx.obj and ctx.obj.get("like") and (value == "like" or ctx.obj.get("all_like")):
return ctx.obj["like"][param.name]
else:
return None
def bounds_handler(ctx, param, value):
"""Handle different forms of bounds."""
retval = from_like_context(ctx, param, value)
if retval is None and value is not None:
try:
value = value.strip(", []")
retval = tuple(float(x) for x in re.split(r"[,\s]+", value))
assert len(retval) == 4
return retval
except Exception:
raise click.BadParameter(
"{0!r} is not a valid bounding box representation".format(value)
)
else: # pragma: no cover
return retval
bounds_opt = click.option(
"--bounds",
default=None,
callback=bounds_handler,
help='Bounds: "left bottom right top" or "[left, bottom, right, top]".',
)
bounds_opt_required = click.option(
"--bounds",
required=True,
default=None,
callback=bounds_handler,
help='Bounds: "left bottom right top" or "[left, bottom, right, top]".',
)
dst_crs_opt = click.option("--dst-crs", "--dst_crs", help="Destination CRS.")
@click.group()
def cli():
pass
@cli.command()
@click.option("--refresh", "-r", is_flag=True, help="Refresh the cached list")
def list(refresh):
"""List DataBC layers available via WFS
"""
# This works too, but is much slower:
# ogrinfo WFS:http://openmaps.gov.bc.ca/geo/ows?VERSION=1.1.0
for table in bcdata.list_tables(refresh):
click.echo(table)
@cli.command()
@click.argument("dataset", type=click.STRING, autocompletion=get_objects)
@indent_opt
# Options to pick out a single metadata item and print it as
# a string.
@click.option(
"--count", "meta_member", flag_value="count", help="Print the count of features."
)
@click.option(
"--name", "meta_member", flag_value="name", help="Print the datasource's name."
)
def info(dataset, indent, meta_member):
"""Print basic metadata about a DataBC WFS layer as JSON.
Optionally print a single metadata item as a string.
"""
table = bcdata.validate_name(dataset)
wfs = WebFeatureService(url=bcdata.OWS_URL, version="2.0.0")
info = {}
info["name"] = table
info["count"] = bcdata.get_count(table)
info["schema"] = wfs.get_schema("pub:" + table)
if meta_member:
click.echo(info[meta_member])
else:
click.echo(json.dumps(info, indent=indent))
@cli.command()
@click.option("--out_file", "-o", help="Output file", default="dem25.tif")
@bounds_opt_required
@dst_crs_opt
@click.option("--src-crs", "--src_crs", help="CRS of provided bounds", default="EPSG:3005")
@click.option("--resolution", "-r", type=int, default=25)
@cli.command()
@click.argument("dataset", type=click.STRING, autocompletion=get_objects)
@click.option(
"--query",
help="A valid CQL or ECQL query, quote enclosed (https://docs.geoserver.org/stable/en/user/tutorials/cql/cql_tutorial.html)",
)
@click.option("--out_file", "-o", help="Output file")
@bounds_opt
def dump(dataset, query, out_file, bounds):
"""Write DataBC features to stdout as GeoJSON feature collection.
\b
$ bcdata dump bc-airports
$ bcdata dump bc-airports --query "AIRPORT_NAME='Victoria Harbour (Shoal Point) Heliport'"
$ bcdata dump bc-airports --bounds xmin ymin xmax ymax
The values of --bounds must be in BC Albers.
It can also be combined to read bounds of a feature dataset using Fiona:
\b
$ bcdata dump bc-airports --bounds $(fio info aoi.shp --bounds)
"""
table = bcdata.validate_name(dataset)
data = bcdata.get_data(table, query=query, bounds=bounds)
if out_file:
with open(out_file, "w") as f:
json.dump(data.json(), f)
else:
sink = click.get_text_stream("stdout")
sink.write(json.dumps(data))
@cli.command()
@click.argument("dataset", type=click.STRING, autocompletion=get_objects)
@click.option(
"--query",
help="A valid `CQL` or `ECQL` query (https://docs.geoserver.org/stable/en/user/tutorials/cql/cql_tutorial.html)",
)
@bounds_opt
@indent_opt
@compact_opt
@dst_crs_opt
@click.option(
"--pagesize", "-p", default=10000, help="Max number of records to request"
)
@click.option("--sortby", "-s", help="Name of sort field")
def cat(dataset, query, bounds, indent, compact, dst_crs, pagesize, sortby):
"""Write DataBC features to stdout as GeoJSON feature objects.
"""
# Note that cat does not concatenate!
dump_kwds = {"sort_keys": True}
if indent:
dump_kwds["indent"] = indent
if compact:
dump_kwds["separators"] = (",", ":")
table = bcdata.validate_name(dataset)
for feat in bcdata.get_features(
table, query=query, bounds=bounds, sortby=sortby, crs=dst_crs
):
click.echo(json.dumps(feat, **dump_kwds))
@cli.command()
@click.argument("dataset", type=click.STRING, autocompletion=get_objects)
@click.option(
"--db_url",
"-db",
help="SQLAlchemy database url",
default=os.environ.get("DATABASE_URL"),
)
@click.option("--table", help="Destination table name")
@click.option("--schema", help="Destination schema name")
@click.option(
"--query",
help="A valid `CQL` or `ECQL` query (https://docs.geoserver.org/stable/en/user/tutorials/cql/cql_tutorial.html)",
)
@click.option("--append", is_flag=True, help="Append to existing table")
@click.option(
"--pagesize", "-p", default=10000, help="Max number of records to request"
)
@click.option("--sortby", "-s", help="Name of sort field")
@click.option(
"--max_workers", "-w", default=5, help="Max number of concurrent requests"
)
def bc2pg(dataset, db_url, table, schema, query, append, pagesize, sortby, max_workers):
"""Download a DataBC WFS layer to postgres - an ogr2ogr wrapper.
\b
$ bcdata bc2pg bc-airports --db_url postgresql://postgres:postgres@localhost:5432/postgis
The default target database can be specified by setting the $DATABASE_URL
environment variable.
https://docs.sqlalchemy.org/en/latest/core/engines.html#database-urls
"""
src = bcdata.validate_name(dataset)
src_schema, src_table = [i.lower() for i in src.split(".")]
if not schema:
schema = src_schema
if not table:
table = src_table
# create schema if it does not exist
conn = pgdata.connect(db_url)
if schema not in conn.schemas:
click.echo("Schema {} does not exist, creating it".format(schema))
conn.create_schema(schema)
# build parameters for each required request
param_dicts = bcdata.define_request(
dataset, query=query, sortby=sortby, pagesize=pagesize
)
try:
# run the first request / load
payload = urlencode(param_dicts[0], doseq=True)
url = bcdata.WFS_URL + "?" + payload
db = parse_db_url(db_url)
db_string = "PG:host={h} user={u} dbname={db} password={pwd}".format(
h=db["host"], u=db["user"], db=db["database"], pwd=db["password"]
)
# create the table
if not append:
command = [
"ogr2ogr",
"-lco",
"OVERWRITE=YES",
"-lco",
"SCHEMA={}".format(schema),
"-lco",
"GEOMETRY_NAME=geom",
"-f",
"PostgreSQL",
db_string,
"-t_srs",
"EPSG:3005",
"-nln",
table,
url,
]
click.echo(" ".join(command))
subprocess.run(command)
# append to table when append specified or processing many chunks
if len(param_dicts) > 1 or append:
# define starting index in list of requests
if append:
idx = 0
else:
idx = 1
commands = []
for chunk, paramdict in enumerate(param_dicts[idx:]):
payload = urlencode(paramdict, doseq=True)
url = bcdata.WFS_URL + "?" + payload
command = [
"ogr2ogr",
"-update",
"-append",
"-f",
"PostgreSQL",
db_string + " active_schema=" + schema,
"-t_srs",
"EPSG:3005",
"-nln",
table,
url,
]
commands.append(command)
# https://stackoverflow.com/questions/14533458
pool = Pool(max_workers)
with click.progressbar(
pool.imap(partial(call), commands), length=len(param_dicts)
) as bar:
for returncode in bar:
if returncode != 0:
click.echo("Command failed: {}".format(returncode))
click.echo(
"Load of {} to {} in {} complete".format(src, schema + "." + table, db_url)
)
except Exception:
click.echo("Data load failed")
raise click.Abort()
|
smnorris/bcdata
|
bcdata/cli.py
|
dump
|
python
|
def dump(dataset, query, out_file, bounds):
table = bcdata.validate_name(dataset)
data = bcdata.get_data(table, query=query, bounds=bounds)
if out_file:
with open(out_file, "w") as f:
json.dump(data.json(), f)
else:
sink = click.get_text_stream("stdout")
sink.write(json.dumps(data))
|
Write DataBC features to stdout as GeoJSON feature collection.
\b
$ bcdata dump bc-airports
$ bcdata dump bc-airports --query "AIRPORT_NAME='Victoria Harbour (Shoal Point) Heliport'"
$ bcdata dump bc-airports --bounds xmin ymin xmax ymax
The values of --bounds must be in BC Albers.
It can also be combined to read bounds of a feature dataset using Fiona:
\b
$ bcdata dump bc-airports --bounds $(fio info aoi.shp --bounds)
|
train
|
https://github.com/smnorris/bcdata/blob/de6b5bbc28d85e36613b51461911ee0a72a146c5/bcdata/cli.py#L159-L181
|
[
"def get_data(\n dataset,\n query=None,\n crs=\"epsg:4326\",\n bounds=None,\n sortby=None,\n pagesize=10000,\n max_workers=5,\n):\n \"\"\"Get GeoJSON featurecollection from DataBC WFS\n \"\"\"\n param_dicts = define_request(dataset, query, crs, bounds, sortby, pagesize)\n\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n results = executor.map(make_request, param_dicts)\n\n outjson = dict(type=\"FeatureCollection\", features=[])\n for result in results:\n outjson[\"features\"] += result\n return outjson\n",
"def validate_name(dataset):\n \"\"\"Check wfs/cache and the bcdc api to see if dataset name is valid\n \"\"\"\n if dataset in list_tables():\n return dataset\n else:\n return bcdc_package_show(dataset)[\"object_name\"]\n"
] |
import json
import logging
import math
import os
import re
import subprocess
from urllib.parse import urlencode
from urllib.parse import urlparse
from functools import partial
from multiprocessing.dummy import Pool
from subprocess import call
import click
from cligj import indent_opt
from cligj import compact_opt
from owslib.wfs import WebFeatureService
import pgdata
import bcdata
bcdata.configure_logging()
log = logging.getLogger(__name__)
def parse_db_url(db_url):
"""provided a db url, return a dict with connection properties
"""
u = urlparse(db_url)
db = {}
db["database"] = u.path[1:]
db["user"] = u.username
db["password"] = u.password
db["host"] = u.hostname
db["port"] = u.port
return db
def get_objects(ctx, args, incomplete):
return [k for k in bcdata.list_tables() if incomplete in k]
# bounds handling direct from rasterio
# https://github.com/mapbox/rasterio/blob/master/rasterio/rio/options.py
# https://github.com/mapbox/rasterio/blob/master/rasterio/rio/clip.py
def from_like_context(ctx, param, value):
"""Return the value for an option from the context if the option
or `--all` is given, else return None."""
if ctx.obj and ctx.obj.get("like") and (value == "like" or ctx.obj.get("all_like")):
return ctx.obj["like"][param.name]
else:
return None
def bounds_handler(ctx, param, value):
"""Handle different forms of bounds."""
retval = from_like_context(ctx, param, value)
if retval is None and value is not None:
try:
value = value.strip(", []")
retval = tuple(float(x) for x in re.split(r"[,\s]+", value))
assert len(retval) == 4
return retval
except Exception:
raise click.BadParameter(
"{0!r} is not a valid bounding box representation".format(value)
)
else: # pragma: no cover
return retval
bounds_opt = click.option(
"--bounds",
default=None,
callback=bounds_handler,
help='Bounds: "left bottom right top" or "[left, bottom, right, top]".',
)
bounds_opt_required = click.option(
"--bounds",
required=True,
default=None,
callback=bounds_handler,
help='Bounds: "left bottom right top" or "[left, bottom, right, top]".',
)
dst_crs_opt = click.option("--dst-crs", "--dst_crs", help="Destination CRS.")
@click.group()
def cli():
pass
@cli.command()
@click.option("--refresh", "-r", is_flag=True, help="Refresh the cached list")
def list(refresh):
"""List DataBC layers available via WFS
"""
# This works too, but is much slower:
# ogrinfo WFS:http://openmaps.gov.bc.ca/geo/ows?VERSION=1.1.0
for table in bcdata.list_tables(refresh):
click.echo(table)
@cli.command()
@click.argument("dataset", type=click.STRING, autocompletion=get_objects)
@indent_opt
# Options to pick out a single metadata item and print it as
# a string.
@click.option(
"--count", "meta_member", flag_value="count", help="Print the count of features."
)
@click.option(
"--name", "meta_member", flag_value="name", help="Print the datasource's name."
)
def info(dataset, indent, meta_member):
"""Print basic metadata about a DataBC WFS layer as JSON.
Optionally print a single metadata item as a string.
"""
table = bcdata.validate_name(dataset)
wfs = WebFeatureService(url=bcdata.OWS_URL, version="2.0.0")
info = {}
info["name"] = table
info["count"] = bcdata.get_count(table)
info["schema"] = wfs.get_schema("pub:" + table)
if meta_member:
click.echo(info[meta_member])
else:
click.echo(json.dumps(info, indent=indent))
@cli.command()
@click.option("--out_file", "-o", help="Output file", default="dem25.tif")
@bounds_opt_required
@dst_crs_opt
@click.option("--src-crs", "--src_crs", help="CRS of provided bounds", default="EPSG:3005")
@click.option("--resolution", "-r", type=int, default=25)
def dem(bounds, src_crs, dst_crs, out_file, resolution):
"""Dump BC DEM to TIFF
"""
if not dst_crs:
dst_crs = "EPSG:3005"
bcdata.get_dem(bounds, out_file=out_file, src_crs=src_crs, dst_crs=dst_crs, resolution=resolution)
@cli.command()
@click.argument("dataset", type=click.STRING, autocompletion=get_objects)
@click.option(
"--query",
help="A valid CQL or ECQL query, quote enclosed (https://docs.geoserver.org/stable/en/user/tutorials/cql/cql_tutorial.html)",
)
@click.option("--out_file", "-o", help="Output file")
@bounds_opt
@cli.command()
@click.argument("dataset", type=click.STRING, autocompletion=get_objects)
@click.option(
"--query",
help="A valid `CQL` or `ECQL` query (https://docs.geoserver.org/stable/en/user/tutorials/cql/cql_tutorial.html)",
)
@bounds_opt
@indent_opt
@compact_opt
@dst_crs_opt
@click.option(
"--pagesize", "-p", default=10000, help="Max number of records to request"
)
@click.option("--sortby", "-s", help="Name of sort field")
def cat(dataset, query, bounds, indent, compact, dst_crs, pagesize, sortby):
"""Write DataBC features to stdout as GeoJSON feature objects.
"""
# Note that cat does not concatenate!
dump_kwds = {"sort_keys": True}
if indent:
dump_kwds["indent"] = indent
if compact:
dump_kwds["separators"] = (",", ":")
table = bcdata.validate_name(dataset)
for feat in bcdata.get_features(
table, query=query, bounds=bounds, sortby=sortby, crs=dst_crs
):
click.echo(json.dumps(feat, **dump_kwds))
@cli.command()
@click.argument("dataset", type=click.STRING, autocompletion=get_objects)
@click.option(
"--db_url",
"-db",
help="SQLAlchemy database url",
default=os.environ.get("DATABASE_URL"),
)
@click.option("--table", help="Destination table name")
@click.option("--schema", help="Destination schema name")
@click.option(
"--query",
help="A valid `CQL` or `ECQL` query (https://docs.geoserver.org/stable/en/user/tutorials/cql/cql_tutorial.html)",
)
@click.option("--append", is_flag=True, help="Append to existing table")
@click.option(
"--pagesize", "-p", default=10000, help="Max number of records to request"
)
@click.option("--sortby", "-s", help="Name of sort field")
@click.option(
"--max_workers", "-w", default=5, help="Max number of concurrent requests"
)
def bc2pg(dataset, db_url, table, schema, query, append, pagesize, sortby, max_workers):
"""Download a DataBC WFS layer to postgres - an ogr2ogr wrapper.
\b
$ bcdata bc2pg bc-airports --db_url postgresql://postgres:postgres@localhost:5432/postgis
The default target database can be specified by setting the $DATABASE_URL
environment variable.
https://docs.sqlalchemy.org/en/latest/core/engines.html#database-urls
"""
src = bcdata.validate_name(dataset)
src_schema, src_table = [i.lower() for i in src.split(".")]
if not schema:
schema = src_schema
if not table:
table = src_table
# create schema if it does not exist
conn = pgdata.connect(db_url)
if schema not in conn.schemas:
click.echo("Schema {} does not exist, creating it".format(schema))
conn.create_schema(schema)
# build parameters for each required request
param_dicts = bcdata.define_request(
dataset, query=query, sortby=sortby, pagesize=pagesize
)
try:
# run the first request / load
payload = urlencode(param_dicts[0], doseq=True)
url = bcdata.WFS_URL + "?" + payload
db = parse_db_url(db_url)
db_string = "PG:host={h} user={u} dbname={db} password={pwd}".format(
h=db["host"], u=db["user"], db=db["database"], pwd=db["password"]
)
# create the table
if not append:
command = [
"ogr2ogr",
"-lco",
"OVERWRITE=YES",
"-lco",
"SCHEMA={}".format(schema),
"-lco",
"GEOMETRY_NAME=geom",
"-f",
"PostgreSQL",
db_string,
"-t_srs",
"EPSG:3005",
"-nln",
table,
url,
]
click.echo(" ".join(command))
subprocess.run(command)
# append to table when append specified or processing many chunks
if len(param_dicts) > 1 or append:
# define starting index in list of requests
if append:
idx = 0
else:
idx = 1
commands = []
for chunk, paramdict in enumerate(param_dicts[idx:]):
payload = urlencode(paramdict, doseq=True)
url = bcdata.WFS_URL + "?" + payload
command = [
"ogr2ogr",
"-update",
"-append",
"-f",
"PostgreSQL",
db_string + " active_schema=" + schema,
"-t_srs",
"EPSG:3005",
"-nln",
table,
url,
]
commands.append(command)
# https://stackoverflow.com/questions/14533458
pool = Pool(max_workers)
with click.progressbar(
pool.imap(partial(call), commands), length=len(param_dicts)
) as bar:
for returncode in bar:
if returncode != 0:
click.echo("Command failed: {}".format(returncode))
click.echo(
"Load of {} to {} in {} complete".format(src, schema + "." + table, db_url)
)
except Exception:
click.echo("Data load failed")
raise click.Abort()
|
smnorris/bcdata
|
bcdata/cli.py
|
cat
|
python
|
def cat(dataset, query, bounds, indent, compact, dst_crs, pagesize, sortby):
# Note that cat does not concatenate!
dump_kwds = {"sort_keys": True}
if indent:
dump_kwds["indent"] = indent
if compact:
dump_kwds["separators"] = (",", ":")
table = bcdata.validate_name(dataset)
for feat in bcdata.get_features(
table, query=query, bounds=bounds, sortby=sortby, crs=dst_crs
):
click.echo(json.dumps(feat, **dump_kwds))
|
Write DataBC features to stdout as GeoJSON feature objects.
|
train
|
https://github.com/smnorris/bcdata/blob/de6b5bbc28d85e36613b51461911ee0a72a146c5/bcdata/cli.py#L198-L211
|
[
"def get_features(\n dataset,\n query=None,\n crs=\"epsg:4326\",\n bounds=None,\n sortby=None,\n pagesize=10000,\n max_workers=5,\n):\n \"\"\"Yield features from DataBC WFS\n \"\"\"\n param_dicts = define_request(dataset, query, crs, bounds, sortby, pagesize)\n\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n for result in executor.map(make_request, param_dicts):\n for feature in result:\n yield feature\n",
"def validate_name(dataset):\n \"\"\"Check wfs/cache and the bcdc api to see if dataset name is valid\n \"\"\"\n if dataset in list_tables():\n return dataset\n else:\n return bcdc_package_show(dataset)[\"object_name\"]\n"
] |
import json
import logging
import math
import os
import re
import subprocess
from urllib.parse import urlencode
from urllib.parse import urlparse
from functools import partial
from multiprocessing.dummy import Pool
from subprocess import call
import click
from cligj import indent_opt
from cligj import compact_opt
from owslib.wfs import WebFeatureService
import pgdata
import bcdata
bcdata.configure_logging()
log = logging.getLogger(__name__)
def parse_db_url(db_url):
"""provided a db url, return a dict with connection properties
"""
u = urlparse(db_url)
db = {}
db["database"] = u.path[1:]
db["user"] = u.username
db["password"] = u.password
db["host"] = u.hostname
db["port"] = u.port
return db
def get_objects(ctx, args, incomplete):
return [k for k in bcdata.list_tables() if incomplete in k]
# bounds handling direct from rasterio
# https://github.com/mapbox/rasterio/blob/master/rasterio/rio/options.py
# https://github.com/mapbox/rasterio/blob/master/rasterio/rio/clip.py
def from_like_context(ctx, param, value):
"""Return the value for an option from the context if the option
or `--all` is given, else return None."""
if ctx.obj and ctx.obj.get("like") and (value == "like" or ctx.obj.get("all_like")):
return ctx.obj["like"][param.name]
else:
return None
def bounds_handler(ctx, param, value):
"""Handle different forms of bounds."""
retval = from_like_context(ctx, param, value)
if retval is None and value is not None:
try:
value = value.strip(", []")
retval = tuple(float(x) for x in re.split(r"[,\s]+", value))
assert len(retval) == 4
return retval
except Exception:
raise click.BadParameter(
"{0!r} is not a valid bounding box representation".format(value)
)
else: # pragma: no cover
return retval
bounds_opt = click.option(
"--bounds",
default=None,
callback=bounds_handler,
help='Bounds: "left bottom right top" or "[left, bottom, right, top]".',
)
bounds_opt_required = click.option(
"--bounds",
required=True,
default=None,
callback=bounds_handler,
help='Bounds: "left bottom right top" or "[left, bottom, right, top]".',
)
dst_crs_opt = click.option("--dst-crs", "--dst_crs", help="Destination CRS.")
@click.group()
def cli():
pass
@cli.command()
@click.option("--refresh", "-r", is_flag=True, help="Refresh the cached list")
def list(refresh):
"""List DataBC layers available via WFS
"""
# This works too, but is much slower:
# ogrinfo WFS:http://openmaps.gov.bc.ca/geo/ows?VERSION=1.1.0
for table in bcdata.list_tables(refresh):
click.echo(table)
@cli.command()
@click.argument("dataset", type=click.STRING, autocompletion=get_objects)
@indent_opt
# Options to pick out a single metadata item and print it as
# a string.
@click.option(
"--count", "meta_member", flag_value="count", help="Print the count of features."
)
@click.option(
"--name", "meta_member", flag_value="name", help="Print the datasource's name."
)
def info(dataset, indent, meta_member):
"""Print basic metadata about a DataBC WFS layer as JSON.
Optionally print a single metadata item as a string.
"""
table = bcdata.validate_name(dataset)
wfs = WebFeatureService(url=bcdata.OWS_URL, version="2.0.0")
info = {}
info["name"] = table
info["count"] = bcdata.get_count(table)
info["schema"] = wfs.get_schema("pub:" + table)
if meta_member:
click.echo(info[meta_member])
else:
click.echo(json.dumps(info, indent=indent))
@cli.command()
@click.option("--out_file", "-o", help="Output file", default="dem25.tif")
@bounds_opt_required
@dst_crs_opt
@click.option("--src-crs", "--src_crs", help="CRS of provided bounds", default="EPSG:3005")
@click.option("--resolution", "-r", type=int, default=25)
def dem(bounds, src_crs, dst_crs, out_file, resolution):
"""Dump BC DEM to TIFF
"""
if not dst_crs:
dst_crs = "EPSG:3005"
bcdata.get_dem(bounds, out_file=out_file, src_crs=src_crs, dst_crs=dst_crs, resolution=resolution)
@cli.command()
@click.argument("dataset", type=click.STRING, autocompletion=get_objects)
@click.option(
"--query",
help="A valid CQL or ECQL query, quote enclosed (https://docs.geoserver.org/stable/en/user/tutorials/cql/cql_tutorial.html)",
)
@click.option("--out_file", "-o", help="Output file")
@bounds_opt
def dump(dataset, query, out_file, bounds):
"""Write DataBC features to stdout as GeoJSON feature collection.
\b
$ bcdata dump bc-airports
$ bcdata dump bc-airports --query "AIRPORT_NAME='Victoria Harbour (Shoal Point) Heliport'"
$ bcdata dump bc-airports --bounds xmin ymin xmax ymax
The values of --bounds must be in BC Albers.
It can also be combined to read bounds of a feature dataset using Fiona:
\b
$ bcdata dump bc-airports --bounds $(fio info aoi.shp --bounds)
"""
table = bcdata.validate_name(dataset)
data = bcdata.get_data(table, query=query, bounds=bounds)
if out_file:
with open(out_file, "w") as f:
json.dump(data.json(), f)
else:
sink = click.get_text_stream("stdout")
sink.write(json.dumps(data))
@cli.command()
@click.argument("dataset", type=click.STRING, autocompletion=get_objects)
@click.option(
"--query",
help="A valid `CQL` or `ECQL` query (https://docs.geoserver.org/stable/en/user/tutorials/cql/cql_tutorial.html)",
)
@bounds_opt
@indent_opt
@compact_opt
@dst_crs_opt
@click.option(
"--pagesize", "-p", default=10000, help="Max number of records to request"
)
@click.option("--sortby", "-s", help="Name of sort field")
@cli.command()
@click.argument("dataset", type=click.STRING, autocompletion=get_objects)
@click.option(
"--db_url",
"-db",
help="SQLAlchemy database url",
default=os.environ.get("DATABASE_URL"),
)
@click.option("--table", help="Destination table name")
@click.option("--schema", help="Destination schema name")
@click.option(
"--query",
help="A valid `CQL` or `ECQL` query (https://docs.geoserver.org/stable/en/user/tutorials/cql/cql_tutorial.html)",
)
@click.option("--append", is_flag=True, help="Append to existing table")
@click.option(
"--pagesize", "-p", default=10000, help="Max number of records to request"
)
@click.option("--sortby", "-s", help="Name of sort field")
@click.option(
"--max_workers", "-w", default=5, help="Max number of concurrent requests"
)
def bc2pg(dataset, db_url, table, schema, query, append, pagesize, sortby, max_workers):
"""Download a DataBC WFS layer to postgres - an ogr2ogr wrapper.
\b
$ bcdata bc2pg bc-airports --db_url postgresql://postgres:postgres@localhost:5432/postgis
The default target database can be specified by setting the $DATABASE_URL
environment variable.
https://docs.sqlalchemy.org/en/latest/core/engines.html#database-urls
"""
src = bcdata.validate_name(dataset)
src_schema, src_table = [i.lower() for i in src.split(".")]
if not schema:
schema = src_schema
if not table:
table = src_table
# create schema if it does not exist
conn = pgdata.connect(db_url)
if schema not in conn.schemas:
click.echo("Schema {} does not exist, creating it".format(schema))
conn.create_schema(schema)
# build parameters for each required request
param_dicts = bcdata.define_request(
dataset, query=query, sortby=sortby, pagesize=pagesize
)
try:
# run the first request / load
payload = urlencode(param_dicts[0], doseq=True)
url = bcdata.WFS_URL + "?" + payload
db = parse_db_url(db_url)
db_string = "PG:host={h} user={u} dbname={db} password={pwd}".format(
h=db["host"], u=db["user"], db=db["database"], pwd=db["password"]
)
# create the table
if not append:
command = [
"ogr2ogr",
"-lco",
"OVERWRITE=YES",
"-lco",
"SCHEMA={}".format(schema),
"-lco",
"GEOMETRY_NAME=geom",
"-f",
"PostgreSQL",
db_string,
"-t_srs",
"EPSG:3005",
"-nln",
table,
url,
]
click.echo(" ".join(command))
subprocess.run(command)
# append to table when append specified or processing many chunks
if len(param_dicts) > 1 or append:
# define starting index in list of requests
if append:
idx = 0
else:
idx = 1
commands = []
for chunk, paramdict in enumerate(param_dicts[idx:]):
payload = urlencode(paramdict, doseq=True)
url = bcdata.WFS_URL + "?" + payload
command = [
"ogr2ogr",
"-update",
"-append",
"-f",
"PostgreSQL",
db_string + " active_schema=" + schema,
"-t_srs",
"EPSG:3005",
"-nln",
table,
url,
]
commands.append(command)
# https://stackoverflow.com/questions/14533458
pool = Pool(max_workers)
with click.progressbar(
pool.imap(partial(call), commands), length=len(param_dicts)
) as bar:
for returncode in bar:
if returncode != 0:
click.echo("Command failed: {}".format(returncode))
click.echo(
"Load of {} to {} in {} complete".format(src, schema + "." + table, db_url)
)
except Exception:
click.echo("Data load failed")
raise click.Abort()
|
smnorris/bcdata
|
bcdata/cli.py
|
bc2pg
|
python
|
def bc2pg(dataset, db_url, table, schema, query, append, pagesize, sortby, max_workers):
src = bcdata.validate_name(dataset)
src_schema, src_table = [i.lower() for i in src.split(".")]
if not schema:
schema = src_schema
if not table:
table = src_table
# create schema if it does not exist
conn = pgdata.connect(db_url)
if schema not in conn.schemas:
click.echo("Schema {} does not exist, creating it".format(schema))
conn.create_schema(schema)
# build parameters for each required request
param_dicts = bcdata.define_request(
dataset, query=query, sortby=sortby, pagesize=pagesize
)
try:
# run the first request / load
payload = urlencode(param_dicts[0], doseq=True)
url = bcdata.WFS_URL + "?" + payload
db = parse_db_url(db_url)
db_string = "PG:host={h} user={u} dbname={db} password={pwd}".format(
h=db["host"], u=db["user"], db=db["database"], pwd=db["password"]
)
# create the table
if not append:
command = [
"ogr2ogr",
"-lco",
"OVERWRITE=YES",
"-lco",
"SCHEMA={}".format(schema),
"-lco",
"GEOMETRY_NAME=geom",
"-f",
"PostgreSQL",
db_string,
"-t_srs",
"EPSG:3005",
"-nln",
table,
url,
]
click.echo(" ".join(command))
subprocess.run(command)
# append to table when append specified or processing many chunks
if len(param_dicts) > 1 or append:
# define starting index in list of requests
if append:
idx = 0
else:
idx = 1
commands = []
for chunk, paramdict in enumerate(param_dicts[idx:]):
payload = urlencode(paramdict, doseq=True)
url = bcdata.WFS_URL + "?" + payload
command = [
"ogr2ogr",
"-update",
"-append",
"-f",
"PostgreSQL",
db_string + " active_schema=" + schema,
"-t_srs",
"EPSG:3005",
"-nln",
table,
url,
]
commands.append(command)
# https://stackoverflow.com/questions/14533458
pool = Pool(max_workers)
with click.progressbar(
pool.imap(partial(call), commands), length=len(param_dicts)
) as bar:
for returncode in bar:
if returncode != 0:
click.echo("Command failed: {}".format(returncode))
click.echo(
"Load of {} to {} in {} complete".format(src, schema + "." + table, db_url)
)
except Exception:
click.echo("Data load failed")
raise click.Abort()
|
Download a DataBC WFS layer to postgres - an ogr2ogr wrapper.
\b
$ bcdata bc2pg bc-airports --db_url postgresql://postgres:postgres@localhost:5432/postgis
The default target database can be specified by setting the $DATABASE_URL
environment variable.
https://docs.sqlalchemy.org/en/latest/core/engines.html#database-urls
|
train
|
https://github.com/smnorris/bcdata/blob/de6b5bbc28d85e36613b51461911ee0a72a146c5/bcdata/cli.py#L235-L333
|
[
"def validate_name(dataset):\n \"\"\"Check wfs/cache and the bcdc api to see if dataset name is valid\n \"\"\"\n if dataset in list_tables():\n return dataset\n else:\n return bcdc_package_show(dataset)[\"object_name\"]\n",
"def define_request(\n dataset, query=None, crs=\"epsg:4326\", bounds=None, sortby=None, pagesize=10000\n):\n \"\"\"Define the getfeature request parameters required to download a dataset\n\n References:\n - http://www.opengeospatial.org/standards/wfs\n - http://docs.geoserver.org/stable/en/user/services/wfs/vendor.html\n - http://docs.geoserver.org/latest/en/user/tutorials/cql/cql_tutorial.html\n \"\"\"\n # validate the table name and find out how many features it holds\n table = validate_name(dataset)\n n = bcdata.get_count(table, query=query)\n\n # DataBC WFS getcapabilities says that it supports paging,\n # and the spec says that responses should include 'next URI'\n # (section 7.7.4.4.1)....\n # But I do not see any next uri in the responses. Instead of following\n # the paged urls, for datasets with >10k records, just generate urls\n # based on number of features in the dataset.\n chunks = math.ceil(n / pagesize)\n\n # if making several requests, we need to sort by something\n if chunks > 1 and not sortby:\n sortby = get_sortkey(table)\n\n # build the request parameters for each chunk\n param_dicts = []\n for i in range(chunks):\n request = {\n \"service\": \"WFS\",\n \"version\": \"2.0.0\",\n \"request\": \"GetFeature\",\n \"typeName\": table,\n \"outputFormat\": \"json\",\n \"SRSNAME\": crs,\n }\n if sortby:\n request[\"sortby\"] = sortby\n if query:\n request[\"CQL_FILTER\"] = query\n if bounds:\n request[\"bbox\"] = \",\".join([str(b) for b in bounds])\n if chunks > 1:\n request[\"startIndex\"] = i * pagesize\n request[\"count\"] = pagesize\n param_dicts.append(request)\n return param_dicts\n",
"def parse_db_url(db_url):\n \"\"\"provided a db url, return a dict with connection properties\n \"\"\"\n u = urlparse(db_url)\n db = {}\n db[\"database\"] = u.path[1:]\n db[\"user\"] = u.username\n db[\"password\"] = u.password\n db[\"host\"] = u.hostname\n db[\"port\"] = u.port\n return db\n"
] |
import json
import logging
import math
import os
import re
import subprocess
from urllib.parse import urlencode
from urllib.parse import urlparse
from functools import partial
from multiprocessing.dummy import Pool
from subprocess import call
import click
from cligj import indent_opt
from cligj import compact_opt
from owslib.wfs import WebFeatureService
import pgdata
import bcdata
bcdata.configure_logging()
log = logging.getLogger(__name__)
def parse_db_url(db_url):
"""provided a db url, return a dict with connection properties
"""
u = urlparse(db_url)
db = {}
db["database"] = u.path[1:]
db["user"] = u.username
db["password"] = u.password
db["host"] = u.hostname
db["port"] = u.port
return db
def get_objects(ctx, args, incomplete):
return [k for k in bcdata.list_tables() if incomplete in k]
# bounds handling direct from rasterio
# https://github.com/mapbox/rasterio/blob/master/rasterio/rio/options.py
# https://github.com/mapbox/rasterio/blob/master/rasterio/rio/clip.py
def from_like_context(ctx, param, value):
"""Return the value for an option from the context if the option
or `--all` is given, else return None."""
if ctx.obj and ctx.obj.get("like") and (value == "like" or ctx.obj.get("all_like")):
return ctx.obj["like"][param.name]
else:
return None
def bounds_handler(ctx, param, value):
"""Handle different forms of bounds."""
retval = from_like_context(ctx, param, value)
if retval is None and value is not None:
try:
value = value.strip(", []")
retval = tuple(float(x) for x in re.split(r"[,\s]+", value))
assert len(retval) == 4
return retval
except Exception:
raise click.BadParameter(
"{0!r} is not a valid bounding box representation".format(value)
)
else: # pragma: no cover
return retval
bounds_opt = click.option(
"--bounds",
default=None,
callback=bounds_handler,
help='Bounds: "left bottom right top" or "[left, bottom, right, top]".',
)
bounds_opt_required = click.option(
"--bounds",
required=True,
default=None,
callback=bounds_handler,
help='Bounds: "left bottom right top" or "[left, bottom, right, top]".',
)
dst_crs_opt = click.option("--dst-crs", "--dst_crs", help="Destination CRS.")
@click.group()
def cli():
pass
@cli.command()
@click.option("--refresh", "-r", is_flag=True, help="Refresh the cached list")
def list(refresh):
"""List DataBC layers available via WFS
"""
# This works too, but is much slower:
# ogrinfo WFS:http://openmaps.gov.bc.ca/geo/ows?VERSION=1.1.0
for table in bcdata.list_tables(refresh):
click.echo(table)
@cli.command()
@click.argument("dataset", type=click.STRING, autocompletion=get_objects)
@indent_opt
# Options to pick out a single metadata item and print it as
# a string.
@click.option(
"--count", "meta_member", flag_value="count", help="Print the count of features."
)
@click.option(
"--name", "meta_member", flag_value="name", help="Print the datasource's name."
)
def info(dataset, indent, meta_member):
"""Print basic metadata about a DataBC WFS layer as JSON.
Optionally print a single metadata item as a string.
"""
table = bcdata.validate_name(dataset)
wfs = WebFeatureService(url=bcdata.OWS_URL, version="2.0.0")
info = {}
info["name"] = table
info["count"] = bcdata.get_count(table)
info["schema"] = wfs.get_schema("pub:" + table)
if meta_member:
click.echo(info[meta_member])
else:
click.echo(json.dumps(info, indent=indent))
@cli.command()
@click.option("--out_file", "-o", help="Output file", default="dem25.tif")
@bounds_opt_required
@dst_crs_opt
@click.option("--src-crs", "--src_crs", help="CRS of provided bounds", default="EPSG:3005")
@click.option("--resolution", "-r", type=int, default=25)
def dem(bounds, src_crs, dst_crs, out_file, resolution):
"""Dump BC DEM to TIFF
"""
if not dst_crs:
dst_crs = "EPSG:3005"
bcdata.get_dem(bounds, out_file=out_file, src_crs=src_crs, dst_crs=dst_crs, resolution=resolution)
@cli.command()
@click.argument("dataset", type=click.STRING, autocompletion=get_objects)
@click.option(
"--query",
help="A valid CQL or ECQL query, quote enclosed (https://docs.geoserver.org/stable/en/user/tutorials/cql/cql_tutorial.html)",
)
@click.option("--out_file", "-o", help="Output file")
@bounds_opt
def dump(dataset, query, out_file, bounds):
"""Write DataBC features to stdout as GeoJSON feature collection.
\b
$ bcdata dump bc-airports
$ bcdata dump bc-airports --query "AIRPORT_NAME='Victoria Harbour (Shoal Point) Heliport'"
$ bcdata dump bc-airports --bounds xmin ymin xmax ymax
The values of --bounds must be in BC Albers.
It can also be combined to read bounds of a feature dataset using Fiona:
\b
$ bcdata dump bc-airports --bounds $(fio info aoi.shp --bounds)
"""
table = bcdata.validate_name(dataset)
data = bcdata.get_data(table, query=query, bounds=bounds)
if out_file:
with open(out_file, "w") as f:
json.dump(data.json(), f)
else:
sink = click.get_text_stream("stdout")
sink.write(json.dumps(data))
@cli.command()
@click.argument("dataset", type=click.STRING, autocompletion=get_objects)
@click.option(
"--query",
help="A valid `CQL` or `ECQL` query (https://docs.geoserver.org/stable/en/user/tutorials/cql/cql_tutorial.html)",
)
@bounds_opt
@indent_opt
@compact_opt
@dst_crs_opt
@click.option(
"--pagesize", "-p", default=10000, help="Max number of records to request"
)
@click.option("--sortby", "-s", help="Name of sort field")
def cat(dataset, query, bounds, indent, compact, dst_crs, pagesize, sortby):
"""Write DataBC features to stdout as GeoJSON feature objects.
"""
# Note that cat does not concatenate!
dump_kwds = {"sort_keys": True}
if indent:
dump_kwds["indent"] = indent
if compact:
dump_kwds["separators"] = (",", ":")
table = bcdata.validate_name(dataset)
for feat in bcdata.get_features(
table, query=query, bounds=bounds, sortby=sortby, crs=dst_crs
):
click.echo(json.dumps(feat, **dump_kwds))
@cli.command()
@click.argument("dataset", type=click.STRING, autocompletion=get_objects)
@click.option(
"--db_url",
"-db",
help="SQLAlchemy database url",
default=os.environ.get("DATABASE_URL"),
)
@click.option("--table", help="Destination table name")
@click.option("--schema", help="Destination schema name")
@click.option(
"--query",
help="A valid `CQL` or `ECQL` query (https://docs.geoserver.org/stable/en/user/tutorials/cql/cql_tutorial.html)",
)
@click.option("--append", is_flag=True, help="Append to existing table")
@click.option(
"--pagesize", "-p", default=10000, help="Max number of records to request"
)
@click.option("--sortby", "-s", help="Name of sort field")
@click.option(
"--max_workers", "-w", default=5, help="Max number of concurrent requests"
)
|
smnorris/bcdata
|
bcdata/wcs.py
|
get_dem
|
python
|
def get_dem(bounds, out_file="dem.tif", src_crs="EPSG:3005", dst_crs="EPSG:3005", resolution=25):
bbox = ",".join([str(b) for b in bounds])
# todo: validate resolution units are equivalent to src_crs units
# build request
payload = {
"service": "WCS",
"version": "1.0.0",
"request": "GetCoverage",
"coverage": "pub:bc_elevation_25m_bcalb",
"Format": "GeoTIFF",
"bbox": bbox,
"CRS": src_crs,
"RESPONSE_CRS": dst_crs,
"resx": str(resolution),
"resy": str(resolution),
}
# request data from WCS
r = requests.get(bcdata.WCS_URL, params=payload)
# save to tiff
if r.status_code == 200:
with open(out_file, "wb") as file:
file.write(r.content)
return out_file
else:
raise RuntimeError(
"WCS request failed with status code {}".format(str(r.status_code))
)
|
Get 25m DEM for provided bounds, write to GeoTIFF
|
train
|
https://github.com/smnorris/bcdata/blob/de6b5bbc28d85e36613b51461911ee0a72a146c5/bcdata/wcs.py#L10-L38
| null |
import logging
import requests
import bcdata
log = logging.getLogger(__name__)
|
smnorris/bcdata
|
bcdata/wfs.py
|
get_sortkey
|
python
|
def get_sortkey(table):
# Just pick the first column in the table in alphabetical order.
# Ideally we would get the primary key from bcdc api, but it doesn't
# seem to be available
wfs = WebFeatureService(url=bcdata.OWS_URL, version="2.0.0")
return sorted(wfs.get_schema("pub:" + table)["properties"].keys())[0]
|
Get a field to sort by
|
train
|
https://github.com/smnorris/bcdata/blob/de6b5bbc28d85e36613b51461911ee0a72a146c5/bcdata/wfs.py#L26-L33
| null |
from datetime import datetime
from datetime import timedelta
import json
import logging
import math
import os
from pathlib import Path
import sys
import warnings
import xml.etree.ElementTree as ET
from concurrent.futures import ThreadPoolExecutor
from owslib.wfs import WebFeatureService
import requests
import bcdata
if not sys.warnoptions:
warnings.simplefilter("ignore")
log = logging.getLogger(__name__)
def check_cache(path):
"""Return true if the cache file holding list of all datasets
does not exist or is older than 30 days
"""
if not os.path.exists(path):
return True
else:
# check the age
mod_date = datetime.fromtimestamp(os.path.getmtime(path))
if mod_date < (datetime.now() - timedelta(days=30)):
return True
else:
return False
def bcdc_package_show(package):
"""Query DataBC Catalogue API about given package
"""
params = {"id": package}
r = requests.get(bcdata.BCDC_API_URL + "package_show", params=params)
if r.status_code != 200:
raise ValueError("{d} is not present in DataBC API list".format(d=package))
return r.json()["result"]
def validate_name(dataset):
"""Check wfs/cache and the bcdc api to see if dataset name is valid
"""
if dataset in list_tables():
return dataset
else:
return bcdc_package_show(dataset)["object_name"]
def list_tables(refresh=False, cache_file=None):
"""Return a list of all datasets available via WFS
"""
# default cache listing all objects available is
# ~/.bcdata
if not cache_file:
cache_file = os.path.join(str(Path.home()), ".bcdata")
# regenerate the cache if:
# - the cache file doesn't exist
# - we force a refresh
# - the cache is older than 1 month
if refresh or check_cache(cache_file):
wfs = WebFeatureService(url=bcdata.OWS_URL, version="2.0.0")
bcdata_objects = [i.strip("pub:") for i in list(wfs.contents)]
with open(cache_file, "w") as outfile:
json.dump(sorted(bcdata_objects), outfile)
else:
with open(cache_file, "r") as infile:
bcdata_objects = json.load(infile)
return bcdata_objects
def get_count(dataset, query=None):
"""Ask DataBC WFS how many features there are in a table/query
"""
# https://gis.stackexchange.com/questions/45101/only-return-the-numberoffeatures-in-a-wfs-query
table = validate_name(dataset)
payload = {
"service": "WFS",
"version": "2.0.0",
"request": "GetFeature",
"typeName": table,
"resultType": "hits",
"outputFormat": "json",
}
if query:
payload["CQL_FILTER"] = query
r = requests.get(bcdata.WFS_URL, params=payload)
return int(ET.fromstring(r.text).attrib["numberMatched"])
def make_request(parameters):
"""Submit a getfeature request to DataBC WFS and return features
"""
r = requests.get(bcdata.WFS_URL, params=parameters)
return r.json()["features"]
def define_request(
dataset, query=None, crs="epsg:4326", bounds=None, sortby=None, pagesize=10000
):
"""Define the getfeature request parameters required to download a dataset
References:
- http://www.opengeospatial.org/standards/wfs
- http://docs.geoserver.org/stable/en/user/services/wfs/vendor.html
- http://docs.geoserver.org/latest/en/user/tutorials/cql/cql_tutorial.html
"""
# validate the table name and find out how many features it holds
table = validate_name(dataset)
n = bcdata.get_count(table, query=query)
# DataBC WFS getcapabilities says that it supports paging,
# and the spec says that responses should include 'next URI'
# (section 7.7.4.4.1)....
# But I do not see any next uri in the responses. Instead of following
# the paged urls, for datasets with >10k records, just generate urls
# based on number of features in the dataset.
chunks = math.ceil(n / pagesize)
# if making several requests, we need to sort by something
if chunks > 1 and not sortby:
sortby = get_sortkey(table)
# build the request parameters for each chunk
param_dicts = []
for i in range(chunks):
request = {
"service": "WFS",
"version": "2.0.0",
"request": "GetFeature",
"typeName": table,
"outputFormat": "json",
"SRSNAME": crs,
}
if sortby:
request["sortby"] = sortby
if query:
request["CQL_FILTER"] = query
if bounds:
request["bbox"] = ",".join([str(b) for b in bounds])
if chunks > 1:
request["startIndex"] = i * pagesize
request["count"] = pagesize
param_dicts.append(request)
return param_dicts
def get_data(
dataset,
query=None,
crs="epsg:4326",
bounds=None,
sortby=None,
pagesize=10000,
max_workers=5,
):
"""Get GeoJSON featurecollection from DataBC WFS
"""
param_dicts = define_request(dataset, query, crs, bounds, sortby, pagesize)
with ThreadPoolExecutor(max_workers=max_workers) as executor:
results = executor.map(make_request, param_dicts)
outjson = dict(type="FeatureCollection", features=[])
for result in results:
outjson["features"] += result
return outjson
def get_features(
dataset,
query=None,
crs="epsg:4326",
bounds=None,
sortby=None,
pagesize=10000,
max_workers=5,
):
"""Yield features from DataBC WFS
"""
param_dicts = define_request(dataset, query, crs, bounds, sortby, pagesize)
with ThreadPoolExecutor(max_workers=max_workers) as executor:
for result in executor.map(make_request, param_dicts):
for feature in result:
yield feature
|
smnorris/bcdata
|
bcdata/wfs.py
|
check_cache
|
python
|
def check_cache(path):
if not os.path.exists(path):
return True
else:
# check the age
mod_date = datetime.fromtimestamp(os.path.getmtime(path))
if mod_date < (datetime.now() - timedelta(days=30)):
return True
else:
return False
|
Return true if the cache file holding list of all datasets
does not exist or is older than 30 days
|
train
|
https://github.com/smnorris/bcdata/blob/de6b5bbc28d85e36613b51461911ee0a72a146c5/bcdata/wfs.py#L36-L48
| null |
from datetime import datetime
from datetime import timedelta
import json
import logging
import math
import os
from pathlib import Path
import sys
import warnings
import xml.etree.ElementTree as ET
from concurrent.futures import ThreadPoolExecutor
from owslib.wfs import WebFeatureService
import requests
import bcdata
if not sys.warnoptions:
warnings.simplefilter("ignore")
log = logging.getLogger(__name__)
def get_sortkey(table):
"""Get a field to sort by
"""
# Just pick the first column in the table in alphabetical order.
# Ideally we would get the primary key from bcdc api, but it doesn't
# seem to be available
wfs = WebFeatureService(url=bcdata.OWS_URL, version="2.0.0")
return sorted(wfs.get_schema("pub:" + table)["properties"].keys())[0]
def bcdc_package_show(package):
"""Query DataBC Catalogue API about given package
"""
params = {"id": package}
r = requests.get(bcdata.BCDC_API_URL + "package_show", params=params)
if r.status_code != 200:
raise ValueError("{d} is not present in DataBC API list".format(d=package))
return r.json()["result"]
def validate_name(dataset):
"""Check wfs/cache and the bcdc api to see if dataset name is valid
"""
if dataset in list_tables():
return dataset
else:
return bcdc_package_show(dataset)["object_name"]
def list_tables(refresh=False, cache_file=None):
"""Return a list of all datasets available via WFS
"""
# default cache listing all objects available is
# ~/.bcdata
if not cache_file:
cache_file = os.path.join(str(Path.home()), ".bcdata")
# regenerate the cache if:
# - the cache file doesn't exist
# - we force a refresh
# - the cache is older than 1 month
if refresh or check_cache(cache_file):
wfs = WebFeatureService(url=bcdata.OWS_URL, version="2.0.0")
bcdata_objects = [i.strip("pub:") for i in list(wfs.contents)]
with open(cache_file, "w") as outfile:
json.dump(sorted(bcdata_objects), outfile)
else:
with open(cache_file, "r") as infile:
bcdata_objects = json.load(infile)
return bcdata_objects
def get_count(dataset, query=None):
"""Ask DataBC WFS how many features there are in a table/query
"""
# https://gis.stackexchange.com/questions/45101/only-return-the-numberoffeatures-in-a-wfs-query
table = validate_name(dataset)
payload = {
"service": "WFS",
"version": "2.0.0",
"request": "GetFeature",
"typeName": table,
"resultType": "hits",
"outputFormat": "json",
}
if query:
payload["CQL_FILTER"] = query
r = requests.get(bcdata.WFS_URL, params=payload)
return int(ET.fromstring(r.text).attrib["numberMatched"])
def make_request(parameters):
"""Submit a getfeature request to DataBC WFS and return features
"""
r = requests.get(bcdata.WFS_URL, params=parameters)
return r.json()["features"]
def define_request(
dataset, query=None, crs="epsg:4326", bounds=None, sortby=None, pagesize=10000
):
"""Define the getfeature request parameters required to download a dataset
References:
- http://www.opengeospatial.org/standards/wfs
- http://docs.geoserver.org/stable/en/user/services/wfs/vendor.html
- http://docs.geoserver.org/latest/en/user/tutorials/cql/cql_tutorial.html
"""
# validate the table name and find out how many features it holds
table = validate_name(dataset)
n = bcdata.get_count(table, query=query)
# DataBC WFS getcapabilities says that it supports paging,
# and the spec says that responses should include 'next URI'
# (section 7.7.4.4.1)....
# But I do not see any next uri in the responses. Instead of following
# the paged urls, for datasets with >10k records, just generate urls
# based on number of features in the dataset.
chunks = math.ceil(n / pagesize)
# if making several requests, we need to sort by something
if chunks > 1 and not sortby:
sortby = get_sortkey(table)
# build the request parameters for each chunk
param_dicts = []
for i in range(chunks):
request = {
"service": "WFS",
"version": "2.0.0",
"request": "GetFeature",
"typeName": table,
"outputFormat": "json",
"SRSNAME": crs,
}
if sortby:
request["sortby"] = sortby
if query:
request["CQL_FILTER"] = query
if bounds:
request["bbox"] = ",".join([str(b) for b in bounds])
if chunks > 1:
request["startIndex"] = i * pagesize
request["count"] = pagesize
param_dicts.append(request)
return param_dicts
def get_data(
dataset,
query=None,
crs="epsg:4326",
bounds=None,
sortby=None,
pagesize=10000,
max_workers=5,
):
"""Get GeoJSON featurecollection from DataBC WFS
"""
param_dicts = define_request(dataset, query, crs, bounds, sortby, pagesize)
with ThreadPoolExecutor(max_workers=max_workers) as executor:
results = executor.map(make_request, param_dicts)
outjson = dict(type="FeatureCollection", features=[])
for result in results:
outjson["features"] += result
return outjson
def get_features(
dataset,
query=None,
crs="epsg:4326",
bounds=None,
sortby=None,
pagesize=10000,
max_workers=5,
):
"""Yield features from DataBC WFS
"""
param_dicts = define_request(dataset, query, crs, bounds, sortby, pagesize)
with ThreadPoolExecutor(max_workers=max_workers) as executor:
for result in executor.map(make_request, param_dicts):
for feature in result:
yield feature
|
smnorris/bcdata
|
bcdata/wfs.py
|
bcdc_package_show
|
python
|
def bcdc_package_show(package):
params = {"id": package}
r = requests.get(bcdata.BCDC_API_URL + "package_show", params=params)
if r.status_code != 200:
raise ValueError("{d} is not present in DataBC API list".format(d=package))
return r.json()["result"]
|
Query DataBC Catalogue API about given package
|
train
|
https://github.com/smnorris/bcdata/blob/de6b5bbc28d85e36613b51461911ee0a72a146c5/bcdata/wfs.py#L51-L58
| null |
from datetime import datetime
from datetime import timedelta
import json
import logging
import math
import os
from pathlib import Path
import sys
import warnings
import xml.etree.ElementTree as ET
from concurrent.futures import ThreadPoolExecutor
from owslib.wfs import WebFeatureService
import requests
import bcdata
if not sys.warnoptions:
warnings.simplefilter("ignore")
log = logging.getLogger(__name__)
def get_sortkey(table):
"""Get a field to sort by
"""
# Just pick the first column in the table in alphabetical order.
# Ideally we would get the primary key from bcdc api, but it doesn't
# seem to be available
wfs = WebFeatureService(url=bcdata.OWS_URL, version="2.0.0")
return sorted(wfs.get_schema("pub:" + table)["properties"].keys())[0]
def check_cache(path):
"""Return true if the cache file holding list of all datasets
does not exist or is older than 30 days
"""
if not os.path.exists(path):
return True
else:
# check the age
mod_date = datetime.fromtimestamp(os.path.getmtime(path))
if mod_date < (datetime.now() - timedelta(days=30)):
return True
else:
return False
def validate_name(dataset):
"""Check wfs/cache and the bcdc api to see if dataset name is valid
"""
if dataset in list_tables():
return dataset
else:
return bcdc_package_show(dataset)["object_name"]
def list_tables(refresh=False, cache_file=None):
"""Return a list of all datasets available via WFS
"""
# default cache listing all objects available is
# ~/.bcdata
if not cache_file:
cache_file = os.path.join(str(Path.home()), ".bcdata")
# regenerate the cache if:
# - the cache file doesn't exist
# - we force a refresh
# - the cache is older than 1 month
if refresh or check_cache(cache_file):
wfs = WebFeatureService(url=bcdata.OWS_URL, version="2.0.0")
bcdata_objects = [i.strip("pub:") for i in list(wfs.contents)]
with open(cache_file, "w") as outfile:
json.dump(sorted(bcdata_objects), outfile)
else:
with open(cache_file, "r") as infile:
bcdata_objects = json.load(infile)
return bcdata_objects
def get_count(dataset, query=None):
"""Ask DataBC WFS how many features there are in a table/query
"""
# https://gis.stackexchange.com/questions/45101/only-return-the-numberoffeatures-in-a-wfs-query
table = validate_name(dataset)
payload = {
"service": "WFS",
"version": "2.0.0",
"request": "GetFeature",
"typeName": table,
"resultType": "hits",
"outputFormat": "json",
}
if query:
payload["CQL_FILTER"] = query
r = requests.get(bcdata.WFS_URL, params=payload)
return int(ET.fromstring(r.text).attrib["numberMatched"])
def make_request(parameters):
"""Submit a getfeature request to DataBC WFS and return features
"""
r = requests.get(bcdata.WFS_URL, params=parameters)
return r.json()["features"]
def define_request(
dataset, query=None, crs="epsg:4326", bounds=None, sortby=None, pagesize=10000
):
"""Define the getfeature request parameters required to download a dataset
References:
- http://www.opengeospatial.org/standards/wfs
- http://docs.geoserver.org/stable/en/user/services/wfs/vendor.html
- http://docs.geoserver.org/latest/en/user/tutorials/cql/cql_tutorial.html
"""
# validate the table name and find out how many features it holds
table = validate_name(dataset)
n = bcdata.get_count(table, query=query)
# DataBC WFS getcapabilities says that it supports paging,
# and the spec says that responses should include 'next URI'
# (section 7.7.4.4.1)....
# But I do not see any next uri in the responses. Instead of following
# the paged urls, for datasets with >10k records, just generate urls
# based on number of features in the dataset.
chunks = math.ceil(n / pagesize)
# if making several requests, we need to sort by something
if chunks > 1 and not sortby:
sortby = get_sortkey(table)
# build the request parameters for each chunk
param_dicts = []
for i in range(chunks):
request = {
"service": "WFS",
"version": "2.0.0",
"request": "GetFeature",
"typeName": table,
"outputFormat": "json",
"SRSNAME": crs,
}
if sortby:
request["sortby"] = sortby
if query:
request["CQL_FILTER"] = query
if bounds:
request["bbox"] = ",".join([str(b) for b in bounds])
if chunks > 1:
request["startIndex"] = i * pagesize
request["count"] = pagesize
param_dicts.append(request)
return param_dicts
def get_data(
dataset,
query=None,
crs="epsg:4326",
bounds=None,
sortby=None,
pagesize=10000,
max_workers=5,
):
"""Get GeoJSON featurecollection from DataBC WFS
"""
param_dicts = define_request(dataset, query, crs, bounds, sortby, pagesize)
with ThreadPoolExecutor(max_workers=max_workers) as executor:
results = executor.map(make_request, param_dicts)
outjson = dict(type="FeatureCollection", features=[])
for result in results:
outjson["features"] += result
return outjson
def get_features(
dataset,
query=None,
crs="epsg:4326",
bounds=None,
sortby=None,
pagesize=10000,
max_workers=5,
):
"""Yield features from DataBC WFS
"""
param_dicts = define_request(dataset, query, crs, bounds, sortby, pagesize)
with ThreadPoolExecutor(max_workers=max_workers) as executor:
for result in executor.map(make_request, param_dicts):
for feature in result:
yield feature
|
smnorris/bcdata
|
bcdata/wfs.py
|
list_tables
|
python
|
def list_tables(refresh=False, cache_file=None):
# default cache listing all objects available is
# ~/.bcdata
if not cache_file:
cache_file = os.path.join(str(Path.home()), ".bcdata")
# regenerate the cache if:
# - the cache file doesn't exist
# - we force a refresh
# - the cache is older than 1 month
if refresh or check_cache(cache_file):
wfs = WebFeatureService(url=bcdata.OWS_URL, version="2.0.0")
bcdata_objects = [i.strip("pub:") for i in list(wfs.contents)]
with open(cache_file, "w") as outfile:
json.dump(sorted(bcdata_objects), outfile)
else:
with open(cache_file, "r") as infile:
bcdata_objects = json.load(infile)
return bcdata_objects
|
Return a list of all datasets available via WFS
|
train
|
https://github.com/smnorris/bcdata/blob/de6b5bbc28d85e36613b51461911ee0a72a146c5/bcdata/wfs.py#L70-L91
|
[
"def check_cache(path):\n \"\"\"Return true if the cache file holding list of all datasets\n does not exist or is older than 30 days\n \"\"\"\n if not os.path.exists(path):\n return True\n else:\n # check the age\n mod_date = datetime.fromtimestamp(os.path.getmtime(path))\n if mod_date < (datetime.now() - timedelta(days=30)):\n return True\n else:\n return False\n"
] |
from datetime import datetime
from datetime import timedelta
import json
import logging
import math
import os
from pathlib import Path
import sys
import warnings
import xml.etree.ElementTree as ET
from concurrent.futures import ThreadPoolExecutor
from owslib.wfs import WebFeatureService
import requests
import bcdata
if not sys.warnoptions:
warnings.simplefilter("ignore")
log = logging.getLogger(__name__)
def get_sortkey(table):
"""Get a field to sort by
"""
# Just pick the first column in the table in alphabetical order.
# Ideally we would get the primary key from bcdc api, but it doesn't
# seem to be available
wfs = WebFeatureService(url=bcdata.OWS_URL, version="2.0.0")
return sorted(wfs.get_schema("pub:" + table)["properties"].keys())[0]
def check_cache(path):
"""Return true if the cache file holding list of all datasets
does not exist or is older than 30 days
"""
if not os.path.exists(path):
return True
else:
# check the age
mod_date = datetime.fromtimestamp(os.path.getmtime(path))
if mod_date < (datetime.now() - timedelta(days=30)):
return True
else:
return False
def bcdc_package_show(package):
"""Query DataBC Catalogue API about given package
"""
params = {"id": package}
r = requests.get(bcdata.BCDC_API_URL + "package_show", params=params)
if r.status_code != 200:
raise ValueError("{d} is not present in DataBC API list".format(d=package))
return r.json()["result"]
def validate_name(dataset):
"""Check wfs/cache and the bcdc api to see if dataset name is valid
"""
if dataset in list_tables():
return dataset
else:
return bcdc_package_show(dataset)["object_name"]
def get_count(dataset, query=None):
"""Ask DataBC WFS how many features there are in a table/query
"""
# https://gis.stackexchange.com/questions/45101/only-return-the-numberoffeatures-in-a-wfs-query
table = validate_name(dataset)
payload = {
"service": "WFS",
"version": "2.0.0",
"request": "GetFeature",
"typeName": table,
"resultType": "hits",
"outputFormat": "json",
}
if query:
payload["CQL_FILTER"] = query
r = requests.get(bcdata.WFS_URL, params=payload)
return int(ET.fromstring(r.text).attrib["numberMatched"])
def make_request(parameters):
"""Submit a getfeature request to DataBC WFS and return features
"""
r = requests.get(bcdata.WFS_URL, params=parameters)
return r.json()["features"]
def define_request(
dataset, query=None, crs="epsg:4326", bounds=None, sortby=None, pagesize=10000
):
"""Define the getfeature request parameters required to download a dataset
References:
- http://www.opengeospatial.org/standards/wfs
- http://docs.geoserver.org/stable/en/user/services/wfs/vendor.html
- http://docs.geoserver.org/latest/en/user/tutorials/cql/cql_tutorial.html
"""
# validate the table name and find out how many features it holds
table = validate_name(dataset)
n = bcdata.get_count(table, query=query)
# DataBC WFS getcapabilities says that it supports paging,
# and the spec says that responses should include 'next URI'
# (section 7.7.4.4.1)....
# But I do not see any next uri in the responses. Instead of following
# the paged urls, for datasets with >10k records, just generate urls
# based on number of features in the dataset.
chunks = math.ceil(n / pagesize)
# if making several requests, we need to sort by something
if chunks > 1 and not sortby:
sortby = get_sortkey(table)
# build the request parameters for each chunk
param_dicts = []
for i in range(chunks):
request = {
"service": "WFS",
"version": "2.0.0",
"request": "GetFeature",
"typeName": table,
"outputFormat": "json",
"SRSNAME": crs,
}
if sortby:
request["sortby"] = sortby
if query:
request["CQL_FILTER"] = query
if bounds:
request["bbox"] = ",".join([str(b) for b in bounds])
if chunks > 1:
request["startIndex"] = i * pagesize
request["count"] = pagesize
param_dicts.append(request)
return param_dicts
def get_data(
dataset,
query=None,
crs="epsg:4326",
bounds=None,
sortby=None,
pagesize=10000,
max_workers=5,
):
"""Get GeoJSON featurecollection from DataBC WFS
"""
param_dicts = define_request(dataset, query, crs, bounds, sortby, pagesize)
with ThreadPoolExecutor(max_workers=max_workers) as executor:
results = executor.map(make_request, param_dicts)
outjson = dict(type="FeatureCollection", features=[])
for result in results:
outjson["features"] += result
return outjson
def get_features(
dataset,
query=None,
crs="epsg:4326",
bounds=None,
sortby=None,
pagesize=10000,
max_workers=5,
):
"""Yield features from DataBC WFS
"""
param_dicts = define_request(dataset, query, crs, bounds, sortby, pagesize)
with ThreadPoolExecutor(max_workers=max_workers) as executor:
for result in executor.map(make_request, param_dicts):
for feature in result:
yield feature
|
smnorris/bcdata
|
bcdata/wfs.py
|
get_count
|
python
|
def get_count(dataset, query=None):
# https://gis.stackexchange.com/questions/45101/only-return-the-numberoffeatures-in-a-wfs-query
table = validate_name(dataset)
payload = {
"service": "WFS",
"version": "2.0.0",
"request": "GetFeature",
"typeName": table,
"resultType": "hits",
"outputFormat": "json",
}
if query:
payload["CQL_FILTER"] = query
r = requests.get(bcdata.WFS_URL, params=payload)
return int(ET.fromstring(r.text).attrib["numberMatched"])
|
Ask DataBC WFS how many features there are in a table/query
|
train
|
https://github.com/smnorris/bcdata/blob/de6b5bbc28d85e36613b51461911ee0a72a146c5/bcdata/wfs.py#L94-L110
|
[
"def validate_name(dataset):\n \"\"\"Check wfs/cache and the bcdc api to see if dataset name is valid\n \"\"\"\n if dataset in list_tables():\n return dataset\n else:\n return bcdc_package_show(dataset)[\"object_name\"]\n"
] |
from datetime import datetime
from datetime import timedelta
import json
import logging
import math
import os
from pathlib import Path
import sys
import warnings
import xml.etree.ElementTree as ET
from concurrent.futures import ThreadPoolExecutor
from owslib.wfs import WebFeatureService
import requests
import bcdata
if not sys.warnoptions:
warnings.simplefilter("ignore")
log = logging.getLogger(__name__)
def get_sortkey(table):
"""Get a field to sort by
"""
# Just pick the first column in the table in alphabetical order.
# Ideally we would get the primary key from bcdc api, but it doesn't
# seem to be available
wfs = WebFeatureService(url=bcdata.OWS_URL, version="2.0.0")
return sorted(wfs.get_schema("pub:" + table)["properties"].keys())[0]
def check_cache(path):
"""Return true if the cache file holding list of all datasets
does not exist or is older than 30 days
"""
if not os.path.exists(path):
return True
else:
# check the age
mod_date = datetime.fromtimestamp(os.path.getmtime(path))
if mod_date < (datetime.now() - timedelta(days=30)):
return True
else:
return False
def bcdc_package_show(package):
"""Query DataBC Catalogue API about given package
"""
params = {"id": package}
r = requests.get(bcdata.BCDC_API_URL + "package_show", params=params)
if r.status_code != 200:
raise ValueError("{d} is not present in DataBC API list".format(d=package))
return r.json()["result"]
def validate_name(dataset):
"""Check wfs/cache and the bcdc api to see if dataset name is valid
"""
if dataset in list_tables():
return dataset
else:
return bcdc_package_show(dataset)["object_name"]
def list_tables(refresh=False, cache_file=None):
"""Return a list of all datasets available via WFS
"""
# default cache listing all objects available is
# ~/.bcdata
if not cache_file:
cache_file = os.path.join(str(Path.home()), ".bcdata")
# regenerate the cache if:
# - the cache file doesn't exist
# - we force a refresh
# - the cache is older than 1 month
if refresh or check_cache(cache_file):
wfs = WebFeatureService(url=bcdata.OWS_URL, version="2.0.0")
bcdata_objects = [i.strip("pub:") for i in list(wfs.contents)]
with open(cache_file, "w") as outfile:
json.dump(sorted(bcdata_objects), outfile)
else:
with open(cache_file, "r") as infile:
bcdata_objects = json.load(infile)
return bcdata_objects
def make_request(parameters):
"""Submit a getfeature request to DataBC WFS and return features
"""
r = requests.get(bcdata.WFS_URL, params=parameters)
return r.json()["features"]
def define_request(
dataset, query=None, crs="epsg:4326", bounds=None, sortby=None, pagesize=10000
):
"""Define the getfeature request parameters required to download a dataset
References:
- http://www.opengeospatial.org/standards/wfs
- http://docs.geoserver.org/stable/en/user/services/wfs/vendor.html
- http://docs.geoserver.org/latest/en/user/tutorials/cql/cql_tutorial.html
"""
# validate the table name and find out how many features it holds
table = validate_name(dataset)
n = bcdata.get_count(table, query=query)
# DataBC WFS getcapabilities says that it supports paging,
# and the spec says that responses should include 'next URI'
# (section 7.7.4.4.1)....
# But I do not see any next uri in the responses. Instead of following
# the paged urls, for datasets with >10k records, just generate urls
# based on number of features in the dataset.
chunks = math.ceil(n / pagesize)
# if making several requests, we need to sort by something
if chunks > 1 and not sortby:
sortby = get_sortkey(table)
# build the request parameters for each chunk
param_dicts = []
for i in range(chunks):
request = {
"service": "WFS",
"version": "2.0.0",
"request": "GetFeature",
"typeName": table,
"outputFormat": "json",
"SRSNAME": crs,
}
if sortby:
request["sortby"] = sortby
if query:
request["CQL_FILTER"] = query
if bounds:
request["bbox"] = ",".join([str(b) for b in bounds])
if chunks > 1:
request["startIndex"] = i * pagesize
request["count"] = pagesize
param_dicts.append(request)
return param_dicts
def get_data(
dataset,
query=None,
crs="epsg:4326",
bounds=None,
sortby=None,
pagesize=10000,
max_workers=5,
):
"""Get GeoJSON featurecollection from DataBC WFS
"""
param_dicts = define_request(dataset, query, crs, bounds, sortby, pagesize)
with ThreadPoolExecutor(max_workers=max_workers) as executor:
results = executor.map(make_request, param_dicts)
outjson = dict(type="FeatureCollection", features=[])
for result in results:
outjson["features"] += result
return outjson
def get_features(
dataset,
query=None,
crs="epsg:4326",
bounds=None,
sortby=None,
pagesize=10000,
max_workers=5,
):
"""Yield features from DataBC WFS
"""
param_dicts = define_request(dataset, query, crs, bounds, sortby, pagesize)
with ThreadPoolExecutor(max_workers=max_workers) as executor:
for result in executor.map(make_request, param_dicts):
for feature in result:
yield feature
|
smnorris/bcdata
|
bcdata/wfs.py
|
make_request
|
python
|
def make_request(parameters):
r = requests.get(bcdata.WFS_URL, params=parameters)
return r.json()["features"]
|
Submit a getfeature request to DataBC WFS and return features
|
train
|
https://github.com/smnorris/bcdata/blob/de6b5bbc28d85e36613b51461911ee0a72a146c5/bcdata/wfs.py#L113-L117
| null |
from datetime import datetime
from datetime import timedelta
import json
import logging
import math
import os
from pathlib import Path
import sys
import warnings
import xml.etree.ElementTree as ET
from concurrent.futures import ThreadPoolExecutor
from owslib.wfs import WebFeatureService
import requests
import bcdata
if not sys.warnoptions:
warnings.simplefilter("ignore")
log = logging.getLogger(__name__)
def get_sortkey(table):
"""Get a field to sort by
"""
# Just pick the first column in the table in alphabetical order.
# Ideally we would get the primary key from bcdc api, but it doesn't
# seem to be available
wfs = WebFeatureService(url=bcdata.OWS_URL, version="2.0.0")
return sorted(wfs.get_schema("pub:" + table)["properties"].keys())[0]
def check_cache(path):
"""Return true if the cache file holding list of all datasets
does not exist or is older than 30 days
"""
if not os.path.exists(path):
return True
else:
# check the age
mod_date = datetime.fromtimestamp(os.path.getmtime(path))
if mod_date < (datetime.now() - timedelta(days=30)):
return True
else:
return False
def bcdc_package_show(package):
"""Query DataBC Catalogue API about given package
"""
params = {"id": package}
r = requests.get(bcdata.BCDC_API_URL + "package_show", params=params)
if r.status_code != 200:
raise ValueError("{d} is not present in DataBC API list".format(d=package))
return r.json()["result"]
def validate_name(dataset):
"""Check wfs/cache and the bcdc api to see if dataset name is valid
"""
if dataset in list_tables():
return dataset
else:
return bcdc_package_show(dataset)["object_name"]
def list_tables(refresh=False, cache_file=None):
"""Return a list of all datasets available via WFS
"""
# default cache listing all objects available is
# ~/.bcdata
if not cache_file:
cache_file = os.path.join(str(Path.home()), ".bcdata")
# regenerate the cache if:
# - the cache file doesn't exist
# - we force a refresh
# - the cache is older than 1 month
if refresh or check_cache(cache_file):
wfs = WebFeatureService(url=bcdata.OWS_URL, version="2.0.0")
bcdata_objects = [i.strip("pub:") for i in list(wfs.contents)]
with open(cache_file, "w") as outfile:
json.dump(sorted(bcdata_objects), outfile)
else:
with open(cache_file, "r") as infile:
bcdata_objects = json.load(infile)
return bcdata_objects
def get_count(dataset, query=None):
"""Ask DataBC WFS how many features there are in a table/query
"""
# https://gis.stackexchange.com/questions/45101/only-return-the-numberoffeatures-in-a-wfs-query
table = validate_name(dataset)
payload = {
"service": "WFS",
"version": "2.0.0",
"request": "GetFeature",
"typeName": table,
"resultType": "hits",
"outputFormat": "json",
}
if query:
payload["CQL_FILTER"] = query
r = requests.get(bcdata.WFS_URL, params=payload)
return int(ET.fromstring(r.text).attrib["numberMatched"])
def define_request(
dataset, query=None, crs="epsg:4326", bounds=None, sortby=None, pagesize=10000
):
"""Define the getfeature request parameters required to download a dataset
References:
- http://www.opengeospatial.org/standards/wfs
- http://docs.geoserver.org/stable/en/user/services/wfs/vendor.html
- http://docs.geoserver.org/latest/en/user/tutorials/cql/cql_tutorial.html
"""
# validate the table name and find out how many features it holds
table = validate_name(dataset)
n = bcdata.get_count(table, query=query)
# DataBC WFS getcapabilities says that it supports paging,
# and the spec says that responses should include 'next URI'
# (section 7.7.4.4.1)....
# But I do not see any next uri in the responses. Instead of following
# the paged urls, for datasets with >10k records, just generate urls
# based on number of features in the dataset.
chunks = math.ceil(n / pagesize)
# if making several requests, we need to sort by something
if chunks > 1 and not sortby:
sortby = get_sortkey(table)
# build the request parameters for each chunk
param_dicts = []
for i in range(chunks):
request = {
"service": "WFS",
"version": "2.0.0",
"request": "GetFeature",
"typeName": table,
"outputFormat": "json",
"SRSNAME": crs,
}
if sortby:
request["sortby"] = sortby
if query:
request["CQL_FILTER"] = query
if bounds:
request["bbox"] = ",".join([str(b) for b in bounds])
if chunks > 1:
request["startIndex"] = i * pagesize
request["count"] = pagesize
param_dicts.append(request)
return param_dicts
def get_data(
dataset,
query=None,
crs="epsg:4326",
bounds=None,
sortby=None,
pagesize=10000,
max_workers=5,
):
"""Get GeoJSON featurecollection from DataBC WFS
"""
param_dicts = define_request(dataset, query, crs, bounds, sortby, pagesize)
with ThreadPoolExecutor(max_workers=max_workers) as executor:
results = executor.map(make_request, param_dicts)
outjson = dict(type="FeatureCollection", features=[])
for result in results:
outjson["features"] += result
return outjson
def get_features(
dataset,
query=None,
crs="epsg:4326",
bounds=None,
sortby=None,
pagesize=10000,
max_workers=5,
):
"""Yield features from DataBC WFS
"""
param_dicts = define_request(dataset, query, crs, bounds, sortby, pagesize)
with ThreadPoolExecutor(max_workers=max_workers) as executor:
for result in executor.map(make_request, param_dicts):
for feature in result:
yield feature
|
smnorris/bcdata
|
bcdata/wfs.py
|
define_request
|
python
|
def define_request(
dataset, query=None, crs="epsg:4326", bounds=None, sortby=None, pagesize=10000
):
# validate the table name and find out how many features it holds
table = validate_name(dataset)
n = bcdata.get_count(table, query=query)
# DataBC WFS getcapabilities says that it supports paging,
# and the spec says that responses should include 'next URI'
# (section 7.7.4.4.1)....
# But I do not see any next uri in the responses. Instead of following
# the paged urls, for datasets with >10k records, just generate urls
# based on number of features in the dataset.
chunks = math.ceil(n / pagesize)
# if making several requests, we need to sort by something
if chunks > 1 and not sortby:
sortby = get_sortkey(table)
# build the request parameters for each chunk
param_dicts = []
for i in range(chunks):
request = {
"service": "WFS",
"version": "2.0.0",
"request": "GetFeature",
"typeName": table,
"outputFormat": "json",
"SRSNAME": crs,
}
if sortby:
request["sortby"] = sortby
if query:
request["CQL_FILTER"] = query
if bounds:
request["bbox"] = ",".join([str(b) for b in bounds])
if chunks > 1:
request["startIndex"] = i * pagesize
request["count"] = pagesize
param_dicts.append(request)
return param_dicts
|
Define the getfeature request parameters required to download a dataset
References:
- http://www.opengeospatial.org/standards/wfs
- http://docs.geoserver.org/stable/en/user/services/wfs/vendor.html
- http://docs.geoserver.org/latest/en/user/tutorials/cql/cql_tutorial.html
|
train
|
https://github.com/smnorris/bcdata/blob/de6b5bbc28d85e36613b51461911ee0a72a146c5/bcdata/wfs.py#L120-L167
|
[
"def get_count(dataset, query=None):\n \"\"\"Ask DataBC WFS how many features there are in a table/query\n \"\"\"\n # https://gis.stackexchange.com/questions/45101/only-return-the-numberoffeatures-in-a-wfs-query\n table = validate_name(dataset)\n payload = {\n \"service\": \"WFS\",\n \"version\": \"2.0.0\",\n \"request\": \"GetFeature\",\n \"typeName\": table,\n \"resultType\": \"hits\",\n \"outputFormat\": \"json\",\n }\n if query:\n payload[\"CQL_FILTER\"] = query\n r = requests.get(bcdata.WFS_URL, params=payload)\n return int(ET.fromstring(r.text).attrib[\"numberMatched\"])\n",
"def validate_name(dataset):\n \"\"\"Check wfs/cache and the bcdc api to see if dataset name is valid\n \"\"\"\n if dataset in list_tables():\n return dataset\n else:\n return bcdc_package_show(dataset)[\"object_name\"]\n",
"def get_sortkey(table):\n \"\"\"Get a field to sort by\n \"\"\"\n # Just pick the first column in the table in alphabetical order.\n # Ideally we would get the primary key from bcdc api, but it doesn't\n # seem to be available\n wfs = WebFeatureService(url=bcdata.OWS_URL, version=\"2.0.0\")\n return sorted(wfs.get_schema(\"pub:\" + table)[\"properties\"].keys())[0]\n"
] |
from datetime import datetime
from datetime import timedelta
import json
import logging
import math
import os
from pathlib import Path
import sys
import warnings
import xml.etree.ElementTree as ET
from concurrent.futures import ThreadPoolExecutor
from owslib.wfs import WebFeatureService
import requests
import bcdata
if not sys.warnoptions:
warnings.simplefilter("ignore")
log = logging.getLogger(__name__)
def get_sortkey(table):
"""Get a field to sort by
"""
# Just pick the first column in the table in alphabetical order.
# Ideally we would get the primary key from bcdc api, but it doesn't
# seem to be available
wfs = WebFeatureService(url=bcdata.OWS_URL, version="2.0.0")
return sorted(wfs.get_schema("pub:" + table)["properties"].keys())[0]
def check_cache(path):
"""Return true if the cache file holding list of all datasets
does not exist or is older than 30 days
"""
if not os.path.exists(path):
return True
else:
# check the age
mod_date = datetime.fromtimestamp(os.path.getmtime(path))
if mod_date < (datetime.now() - timedelta(days=30)):
return True
else:
return False
def bcdc_package_show(package):
"""Query DataBC Catalogue API about given package
"""
params = {"id": package}
r = requests.get(bcdata.BCDC_API_URL + "package_show", params=params)
if r.status_code != 200:
raise ValueError("{d} is not present in DataBC API list".format(d=package))
return r.json()["result"]
def validate_name(dataset):
"""Check wfs/cache and the bcdc api to see if dataset name is valid
"""
if dataset in list_tables():
return dataset
else:
return bcdc_package_show(dataset)["object_name"]
def list_tables(refresh=False, cache_file=None):
"""Return a list of all datasets available via WFS
"""
# default cache listing all objects available is
# ~/.bcdata
if not cache_file:
cache_file = os.path.join(str(Path.home()), ".bcdata")
# regenerate the cache if:
# - the cache file doesn't exist
# - we force a refresh
# - the cache is older than 1 month
if refresh or check_cache(cache_file):
wfs = WebFeatureService(url=bcdata.OWS_URL, version="2.0.0")
bcdata_objects = [i.strip("pub:") for i in list(wfs.contents)]
with open(cache_file, "w") as outfile:
json.dump(sorted(bcdata_objects), outfile)
else:
with open(cache_file, "r") as infile:
bcdata_objects = json.load(infile)
return bcdata_objects
def get_count(dataset, query=None):
"""Ask DataBC WFS how many features there are in a table/query
"""
# https://gis.stackexchange.com/questions/45101/only-return-the-numberoffeatures-in-a-wfs-query
table = validate_name(dataset)
payload = {
"service": "WFS",
"version": "2.0.0",
"request": "GetFeature",
"typeName": table,
"resultType": "hits",
"outputFormat": "json",
}
if query:
payload["CQL_FILTER"] = query
r = requests.get(bcdata.WFS_URL, params=payload)
return int(ET.fromstring(r.text).attrib["numberMatched"])
def make_request(parameters):
"""Submit a getfeature request to DataBC WFS and return features
"""
r = requests.get(bcdata.WFS_URL, params=parameters)
return r.json()["features"]
def get_data(
dataset,
query=None,
crs="epsg:4326",
bounds=None,
sortby=None,
pagesize=10000,
max_workers=5,
):
"""Get GeoJSON featurecollection from DataBC WFS
"""
param_dicts = define_request(dataset, query, crs, bounds, sortby, pagesize)
with ThreadPoolExecutor(max_workers=max_workers) as executor:
results = executor.map(make_request, param_dicts)
outjson = dict(type="FeatureCollection", features=[])
for result in results:
outjson["features"] += result
return outjson
def get_features(
dataset,
query=None,
crs="epsg:4326",
bounds=None,
sortby=None,
pagesize=10000,
max_workers=5,
):
"""Yield features from DataBC WFS
"""
param_dicts = define_request(dataset, query, crs, bounds, sortby, pagesize)
with ThreadPoolExecutor(max_workers=max_workers) as executor:
for result in executor.map(make_request, param_dicts):
for feature in result:
yield feature
|
smnorris/bcdata
|
bcdata/wfs.py
|
get_data
|
python
|
def get_data(
dataset,
query=None,
crs="epsg:4326",
bounds=None,
sortby=None,
pagesize=10000,
max_workers=5,
):
param_dicts = define_request(dataset, query, crs, bounds, sortby, pagesize)
with ThreadPoolExecutor(max_workers=max_workers) as executor:
results = executor.map(make_request, param_dicts)
outjson = dict(type="FeatureCollection", features=[])
for result in results:
outjson["features"] += result
return outjson
|
Get GeoJSON featurecollection from DataBC WFS
|
train
|
https://github.com/smnorris/bcdata/blob/de6b5bbc28d85e36613b51461911ee0a72a146c5/bcdata/wfs.py#L170-L189
|
[
"def define_request(\n dataset, query=None, crs=\"epsg:4326\", bounds=None, sortby=None, pagesize=10000\n):\n \"\"\"Define the getfeature request parameters required to download a dataset\n\n References:\n - http://www.opengeospatial.org/standards/wfs\n - http://docs.geoserver.org/stable/en/user/services/wfs/vendor.html\n - http://docs.geoserver.org/latest/en/user/tutorials/cql/cql_tutorial.html\n \"\"\"\n # validate the table name and find out how many features it holds\n table = validate_name(dataset)\n n = bcdata.get_count(table, query=query)\n\n # DataBC WFS getcapabilities says that it supports paging,\n # and the spec says that responses should include 'next URI'\n # (section 7.7.4.4.1)....\n # But I do not see any next uri in the responses. Instead of following\n # the paged urls, for datasets with >10k records, just generate urls\n # based on number of features in the dataset.\n chunks = math.ceil(n / pagesize)\n\n # if making several requests, we need to sort by something\n if chunks > 1 and not sortby:\n sortby = get_sortkey(table)\n\n # build the request parameters for each chunk\n param_dicts = []\n for i in range(chunks):\n request = {\n \"service\": \"WFS\",\n \"version\": \"2.0.0\",\n \"request\": \"GetFeature\",\n \"typeName\": table,\n \"outputFormat\": \"json\",\n \"SRSNAME\": crs,\n }\n if sortby:\n request[\"sortby\"] = sortby\n if query:\n request[\"CQL_FILTER\"] = query\n if bounds:\n request[\"bbox\"] = \",\".join([str(b) for b in bounds])\n if chunks > 1:\n request[\"startIndex\"] = i * pagesize\n request[\"count\"] = pagesize\n param_dicts.append(request)\n return param_dicts\n"
] |
from datetime import datetime
from datetime import timedelta
import json
import logging
import math
import os
from pathlib import Path
import sys
import warnings
import xml.etree.ElementTree as ET
from concurrent.futures import ThreadPoolExecutor
from owslib.wfs import WebFeatureService
import requests
import bcdata
if not sys.warnoptions:
warnings.simplefilter("ignore")
log = logging.getLogger(__name__)
def get_sortkey(table):
"""Get a field to sort by
"""
# Just pick the first column in the table in alphabetical order.
# Ideally we would get the primary key from bcdc api, but it doesn't
# seem to be available
wfs = WebFeatureService(url=bcdata.OWS_URL, version="2.0.0")
return sorted(wfs.get_schema("pub:" + table)["properties"].keys())[0]
def check_cache(path):
"""Return true if the cache file holding list of all datasets
does not exist or is older than 30 days
"""
if not os.path.exists(path):
return True
else:
# check the age
mod_date = datetime.fromtimestamp(os.path.getmtime(path))
if mod_date < (datetime.now() - timedelta(days=30)):
return True
else:
return False
def bcdc_package_show(package):
"""Query DataBC Catalogue API about given package
"""
params = {"id": package}
r = requests.get(bcdata.BCDC_API_URL + "package_show", params=params)
if r.status_code != 200:
raise ValueError("{d} is not present in DataBC API list".format(d=package))
return r.json()["result"]
def validate_name(dataset):
"""Check wfs/cache and the bcdc api to see if dataset name is valid
"""
if dataset in list_tables():
return dataset
else:
return bcdc_package_show(dataset)["object_name"]
def list_tables(refresh=False, cache_file=None):
"""Return a list of all datasets available via WFS
"""
# default cache listing all objects available is
# ~/.bcdata
if not cache_file:
cache_file = os.path.join(str(Path.home()), ".bcdata")
# regenerate the cache if:
# - the cache file doesn't exist
# - we force a refresh
# - the cache is older than 1 month
if refresh or check_cache(cache_file):
wfs = WebFeatureService(url=bcdata.OWS_URL, version="2.0.0")
bcdata_objects = [i.strip("pub:") for i in list(wfs.contents)]
with open(cache_file, "w") as outfile:
json.dump(sorted(bcdata_objects), outfile)
else:
with open(cache_file, "r") as infile:
bcdata_objects = json.load(infile)
return bcdata_objects
def get_count(dataset, query=None):
"""Ask DataBC WFS how many features there are in a table/query
"""
# https://gis.stackexchange.com/questions/45101/only-return-the-numberoffeatures-in-a-wfs-query
table = validate_name(dataset)
payload = {
"service": "WFS",
"version": "2.0.0",
"request": "GetFeature",
"typeName": table,
"resultType": "hits",
"outputFormat": "json",
}
if query:
payload["CQL_FILTER"] = query
r = requests.get(bcdata.WFS_URL, params=payload)
return int(ET.fromstring(r.text).attrib["numberMatched"])
def make_request(parameters):
"""Submit a getfeature request to DataBC WFS and return features
"""
r = requests.get(bcdata.WFS_URL, params=parameters)
return r.json()["features"]
def define_request(
dataset, query=None, crs="epsg:4326", bounds=None, sortby=None, pagesize=10000
):
"""Define the getfeature request parameters required to download a dataset
References:
- http://www.opengeospatial.org/standards/wfs
- http://docs.geoserver.org/stable/en/user/services/wfs/vendor.html
- http://docs.geoserver.org/latest/en/user/tutorials/cql/cql_tutorial.html
"""
# validate the table name and find out how many features it holds
table = validate_name(dataset)
n = bcdata.get_count(table, query=query)
# DataBC WFS getcapabilities says that it supports paging,
# and the spec says that responses should include 'next URI'
# (section 7.7.4.4.1)....
# But I do not see any next uri in the responses. Instead of following
# the paged urls, for datasets with >10k records, just generate urls
# based on number of features in the dataset.
chunks = math.ceil(n / pagesize)
# if making several requests, we need to sort by something
if chunks > 1 and not sortby:
sortby = get_sortkey(table)
# build the request parameters for each chunk
param_dicts = []
for i in range(chunks):
request = {
"service": "WFS",
"version": "2.0.0",
"request": "GetFeature",
"typeName": table,
"outputFormat": "json",
"SRSNAME": crs,
}
if sortby:
request["sortby"] = sortby
if query:
request["CQL_FILTER"] = query
if bounds:
request["bbox"] = ",".join([str(b) for b in bounds])
if chunks > 1:
request["startIndex"] = i * pagesize
request["count"] = pagesize
param_dicts.append(request)
return param_dicts
def get_features(
dataset,
query=None,
crs="epsg:4326",
bounds=None,
sortby=None,
pagesize=10000,
max_workers=5,
):
"""Yield features from DataBC WFS
"""
param_dicts = define_request(dataset, query, crs, bounds, sortby, pagesize)
with ThreadPoolExecutor(max_workers=max_workers) as executor:
for result in executor.map(make_request, param_dicts):
for feature in result:
yield feature
|
smnorris/bcdata
|
bcdata/wfs.py
|
get_features
|
python
|
def get_features(
dataset,
query=None,
crs="epsg:4326",
bounds=None,
sortby=None,
pagesize=10000,
max_workers=5,
):
param_dicts = define_request(dataset, query, crs, bounds, sortby, pagesize)
with ThreadPoolExecutor(max_workers=max_workers) as executor:
for result in executor.map(make_request, param_dicts):
for feature in result:
yield feature
|
Yield features from DataBC WFS
|
train
|
https://github.com/smnorris/bcdata/blob/de6b5bbc28d85e36613b51461911ee0a72a146c5/bcdata/wfs.py#L192-L208
|
[
"def define_request(\n dataset, query=None, crs=\"epsg:4326\", bounds=None, sortby=None, pagesize=10000\n):\n \"\"\"Define the getfeature request parameters required to download a dataset\n\n References:\n - http://www.opengeospatial.org/standards/wfs\n - http://docs.geoserver.org/stable/en/user/services/wfs/vendor.html\n - http://docs.geoserver.org/latest/en/user/tutorials/cql/cql_tutorial.html\n \"\"\"\n # validate the table name and find out how many features it holds\n table = validate_name(dataset)\n n = bcdata.get_count(table, query=query)\n\n # DataBC WFS getcapabilities says that it supports paging,\n # and the spec says that responses should include 'next URI'\n # (section 7.7.4.4.1)....\n # But I do not see any next uri in the responses. Instead of following\n # the paged urls, for datasets with >10k records, just generate urls\n # based on number of features in the dataset.\n chunks = math.ceil(n / pagesize)\n\n # if making several requests, we need to sort by something\n if chunks > 1 and not sortby:\n sortby = get_sortkey(table)\n\n # build the request parameters for each chunk\n param_dicts = []\n for i in range(chunks):\n request = {\n \"service\": \"WFS\",\n \"version\": \"2.0.0\",\n \"request\": \"GetFeature\",\n \"typeName\": table,\n \"outputFormat\": \"json\",\n \"SRSNAME\": crs,\n }\n if sortby:\n request[\"sortby\"] = sortby\n if query:\n request[\"CQL_FILTER\"] = query\n if bounds:\n request[\"bbox\"] = \",\".join([str(b) for b in bounds])\n if chunks > 1:\n request[\"startIndex\"] = i * pagesize\n request[\"count\"] = pagesize\n param_dicts.append(request)\n return param_dicts\n"
] |
from datetime import datetime
from datetime import timedelta
import json
import logging
import math
import os
from pathlib import Path
import sys
import warnings
import xml.etree.ElementTree as ET
from concurrent.futures import ThreadPoolExecutor
from owslib.wfs import WebFeatureService
import requests
import bcdata
if not sys.warnoptions:
warnings.simplefilter("ignore")
log = logging.getLogger(__name__)
def get_sortkey(table):
"""Get a field to sort by
"""
# Just pick the first column in the table in alphabetical order.
# Ideally we would get the primary key from bcdc api, but it doesn't
# seem to be available
wfs = WebFeatureService(url=bcdata.OWS_URL, version="2.0.0")
return sorted(wfs.get_schema("pub:" + table)["properties"].keys())[0]
def check_cache(path):
"""Return true if the cache file holding list of all datasets
does not exist or is older than 30 days
"""
if not os.path.exists(path):
return True
else:
# check the age
mod_date = datetime.fromtimestamp(os.path.getmtime(path))
if mod_date < (datetime.now() - timedelta(days=30)):
return True
else:
return False
def bcdc_package_show(package):
"""Query DataBC Catalogue API about given package
"""
params = {"id": package}
r = requests.get(bcdata.BCDC_API_URL + "package_show", params=params)
if r.status_code != 200:
raise ValueError("{d} is not present in DataBC API list".format(d=package))
return r.json()["result"]
def validate_name(dataset):
"""Check wfs/cache and the bcdc api to see if dataset name is valid
"""
if dataset in list_tables():
return dataset
else:
return bcdc_package_show(dataset)["object_name"]
def list_tables(refresh=False, cache_file=None):
"""Return a list of all datasets available via WFS
"""
# default cache listing all objects available is
# ~/.bcdata
if not cache_file:
cache_file = os.path.join(str(Path.home()), ".bcdata")
# regenerate the cache if:
# - the cache file doesn't exist
# - we force a refresh
# - the cache is older than 1 month
if refresh or check_cache(cache_file):
wfs = WebFeatureService(url=bcdata.OWS_URL, version="2.0.0")
bcdata_objects = [i.strip("pub:") for i in list(wfs.contents)]
with open(cache_file, "w") as outfile:
json.dump(sorted(bcdata_objects), outfile)
else:
with open(cache_file, "r") as infile:
bcdata_objects = json.load(infile)
return bcdata_objects
def get_count(dataset, query=None):
"""Ask DataBC WFS how many features there are in a table/query
"""
# https://gis.stackexchange.com/questions/45101/only-return-the-numberoffeatures-in-a-wfs-query
table = validate_name(dataset)
payload = {
"service": "WFS",
"version": "2.0.0",
"request": "GetFeature",
"typeName": table,
"resultType": "hits",
"outputFormat": "json",
}
if query:
payload["CQL_FILTER"] = query
r = requests.get(bcdata.WFS_URL, params=payload)
return int(ET.fromstring(r.text).attrib["numberMatched"])
def make_request(parameters):
"""Submit a getfeature request to DataBC WFS and return features
"""
r = requests.get(bcdata.WFS_URL, params=parameters)
return r.json()["features"]
def define_request(
dataset, query=None, crs="epsg:4326", bounds=None, sortby=None, pagesize=10000
):
"""Define the getfeature request parameters required to download a dataset
References:
- http://www.opengeospatial.org/standards/wfs
- http://docs.geoserver.org/stable/en/user/services/wfs/vendor.html
- http://docs.geoserver.org/latest/en/user/tutorials/cql/cql_tutorial.html
"""
# validate the table name and find out how many features it holds
table = validate_name(dataset)
n = bcdata.get_count(table, query=query)
# DataBC WFS getcapabilities says that it supports paging,
# and the spec says that responses should include 'next URI'
# (section 7.7.4.4.1)....
# But I do not see any next uri in the responses. Instead of following
# the paged urls, for datasets with >10k records, just generate urls
# based on number of features in the dataset.
chunks = math.ceil(n / pagesize)
# if making several requests, we need to sort by something
if chunks > 1 and not sortby:
sortby = get_sortkey(table)
# build the request parameters for each chunk
param_dicts = []
for i in range(chunks):
request = {
"service": "WFS",
"version": "2.0.0",
"request": "GetFeature",
"typeName": table,
"outputFormat": "json",
"SRSNAME": crs,
}
if sortby:
request["sortby"] = sortby
if query:
request["CQL_FILTER"] = query
if bounds:
request["bbox"] = ",".join([str(b) for b in bounds])
if chunks > 1:
request["startIndex"] = i * pagesize
request["count"] = pagesize
param_dicts.append(request)
return param_dicts
def get_data(
dataset,
query=None,
crs="epsg:4326",
bounds=None,
sortby=None,
pagesize=10000,
max_workers=5,
):
"""Get GeoJSON featurecollection from DataBC WFS
"""
param_dicts = define_request(dataset, query, crs, bounds, sortby, pagesize)
with ThreadPoolExecutor(max_workers=max_workers) as executor:
results = executor.map(make_request, param_dicts)
outjson = dict(type="FeatureCollection", features=[])
for result in results:
outjson["features"] += result
return outjson
|
aquatix/ns-api
|
ns_api.py
|
simple_time
|
python
|
def simple_time(value):
if isinstance(value, timedelta):
return ':'.join(str(value).split(':')[:2])
return datetime_to_string(value, '%H:%M')
|
Format a datetime or timedelta object to a string of format HH:MM
|
train
|
https://github.com/aquatix/ns-api/blob/9b3379f8df6217132f457c4363457c16321c2448/ns_api.py#L32-L38
| null |
"""
Library to query the official Dutch railways API
"""
from __future__ import print_function
import collections
import json
import time
from datetime import datetime, timedelta
import pytz
import requests
import xmltodict
from future.utils import python_2_unicode_compatible
from pytz.tzinfo import StaticTzInfo
from requests.auth import HTTPBasicAuth
## ns-api library version
__version__ = '2.7.5'
## Date/time helpers
NS_DATETIME = "%Y-%m-%dT%H:%M:%S%z"
def datetime_to_string(timestamp, dt_format='%Y-%m-%d %H:%M:%S'):
"""
Format datetime object to string
"""
return timestamp.strftime(dt_format)
## Timezone helpers
def is_dst(zonename):
"""
Find out whether it's Daylight Saving Time in this timezone
"""
tz = pytz.timezone(zonename)
now = pytz.utc.localize(datetime.utcnow())
return now.astimezone(tz).dst() != timedelta(0)
class OffsetTime(StaticTzInfo):
"""
A dumb timezone based on offset such as +0530, -0600, etc.
"""
def __init__(self, offset):
hours = int(offset[:3])
minutes = int(offset[0] + offset[3:])
self._utcoffset = timedelta(hours=hours, minutes=minutes)
def load_datetime(value, dt_format):
"""
Create timezone-aware datetime object
"""
if dt_format.endswith('%z'):
dt_format = dt_format[:-2]
offset = value[-5:]
value = value[:-5]
if offset != offset.replace(':', ''):
# strip : from HHMM if needed (isoformat() adds it between HH and MM)
offset = '+' + offset.replace(':', '')
value = value[:-1]
return OffsetTime(offset).localize(datetime.strptime(value, dt_format))
return datetime.strptime(value, dt_format)
## List helpers
def list_to_json(source_list):
"""
Serialise all the items in source_list to json
"""
result = []
for item in source_list:
result.append(item.to_json())
return result
def list_from_json(source_list_json):
"""
Deserialise all the items in source_list from json
"""
result = []
if source_list_json == [] or source_list_json == None:
return result
for list_item in source_list_json:
item = json.loads(list_item)
try:
if item['class_name'] == 'Departure':
temp = Departure()
elif item['class_name'] == 'Disruption':
temp = Disruption()
elif item['class_name'] == 'Station':
temp = Station()
elif item['class_name'] == 'Trip':
temp = Trip()
elif item['class_name'] == 'TripRemark':
temp = TripRemark()
elif item['class_name'] == 'TripStop':
temp = TripStop()
elif item['class_name'] == 'TripSubpart':
temp = TripSubpart()
else:
print('Unrecognised Class ' + item['class_name'] + ', skipping')
continue
temp.from_json(list_item)
result.append(temp)
except KeyError:
print('Unrecognised item with no class_name, skipping')
continue
return result
def list_diff(list_a, list_b):
"""
Return the items from list_b that differ from list_a
"""
result = []
for item in list_b:
if not item in list_a:
result.append(item)
return result
def list_same(list_a, list_b):
"""
Return the items from list_b that are also on list_a
"""
result = []
for item in list_b:
if item in list_a:
result.append(item)
return result
def list_merge(list_a, list_b):
"""
Merge two lists without duplicating items
Args:
list_a: list
list_b: list
Returns:
New list with deduplicated items from list_a and list_b
"""
#return list(collections.OrderedDict.fromkeys(list_a + list_b))
#result = list(list_b)
result = []
for item in list_a:
if not item in result:
result.append(item)
for item in list_b:
if not item in result:
result.append(item)
return result
## NS API objects
@python_2_unicode_compatible
class BaseObject(object):
"""
Base object with useful functions
"""
def __getstate__(self):
result = self.__dict__.copy()
result['class_name'] = self.__class__.__name__
return result
def to_json(self):
"""
Create a JSON representation of this model
"""
#return json.dumps(self.__getstate__())
return json.dumps(self.__getstate__(), ensure_ascii=False)
def __setstate__(self, source_dict):
if not source_dict:
# Somehow the source is None
return
del source_dict['class_name']
self.__dict__ = source_dict
def from_json(self, source_json):
"""
Parse a JSON representation of this model back to, well, the model
"""
#source_dict = json.JSONDecoder(object_pairs_hook=collections.OrderedDict).decode(source_json)
source_dict = json.JSONDecoder().decode(source_json)
self.__setstate__(source_dict)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return self.__str__()
def __str__(self):
raise NotImplementedError('subclasses must override __str__()')
class Station(BaseObject):
"""
Information on a railway station
"""
def __init__(self, stat_dict=None):
if stat_dict is None:
return
self.key = stat_dict['Code']
self.code = stat_dict['Code']
self.uic_code = stat_dict['UICCode']
self.stationtype = stat_dict['Type']
self.names = {
'short': stat_dict['Namen']['Kort'],
'middle': stat_dict['Namen']['Middel'],
'long': stat_dict['Namen']['Lang']
}
self.country = stat_dict['Land']
self.lat = stat_dict['Lat']
self.lon = stat_dict['Lon']
self.synonyms = []
try:
raw_synonyms = stat_dict['Synoniemen']['Synoniem']
if isinstance(raw_synonyms, str):
raw_synonyms = [raw_synonyms]
for synonym in raw_synonyms:
self.synonyms.append(synonym)
except TypeError:
self.synonyms = []
def __str__(self):
return u'<Station> {0} {1}'.format(self.code, self.names['long'])
class Disruption(BaseObject):
"""
Planned and unplanned disruptions of the railroad traffic
"""
def __init__(self, part_dict=None):
if part_dict is None:
return
self.key = part_dict['id']
self.line = part_dict['Traject']
self.message = part_dict['Bericht']
try:
self.reason = part_dict['Reden']
except KeyError:
self.reason = None
try:
self.cause = part_dict['Oorzaak']
except KeyError:
self.cause = None
try:
self.delay_text = part_dict['Vertraging']
except KeyError:
self.delay_text = None
try:
self.timestamp = load_datetime(part_dict['Datum'], NS_DATETIME)
except:
self.timestamp = None
def __getstate__(self):
result = super(Disruption, self).__getstate__()
result['timestamp'] = result['timestamp'].isoformat()
return result
def __setstate__(self, source_dict):
super(Disruption, self).__setstate__(source_dict)
self.timestamp = load_datetime(self.timestamp, NS_DATETIME)
def __str__(self):
return u'<Disruption> {0}'.format(self.line)
#return u'<Disruption> {0}'.format(self.key)
class Departure(BaseObject):
"""
Information on a departing train on a certain station
"""
def __init__(self, departure_dict=None):
if departure_dict is None:
return
self.key = departure_dict['RitNummer'] + '_' + departure_dict['VertrekTijd']
self.trip_number = departure_dict['RitNummer']
self.departure_time = load_datetime(departure_dict['VertrekTijd'], NS_DATETIME)
try:
self.has_delay = True
self.departure_delay = departure_dict['VertrekVertraging']
self.departure_delay_text = departure_dict['VertrekVertragingTekst']
except KeyError:
self.has_delay = False
self.departure_platform = departure_dict['VertrekSpoor']
self.departure_platform_changed = departure_dict['VertrekSpoor']['@wijziging']
self.destination = departure_dict['EindBestemming']
try:
self.route_text = departure_dict['RouteTekst']
except KeyError:
self.route_text = None
self.train_type = departure_dict = ['TreinSoort']
self.carrier = departure_dict = ['Vervoerder']
try:
self.journey_tip = departure_dict = ['ReisTip']
except KeyError:
self.journey_tip = None
try:
self.remarks = departure_dict = ['Opmerkingen']
except KeyError:
self.remarks = []
def __getstate__(self):
result = super(Departure, self).__getstate__()
result['departure_time'] = result['departure_time'].isoformat()
return result
def __setstate__(self, source_dict):
super(Departure, self).__setstate__(source_dict)
self.departure_time = load_datetime(source_dict['VertrekTijd'], NS_DATETIME)
@property
def delay(self):
"""
Return the delay of the train for this instance
"""
if self.has_delay:
return self.departure_delay
else:
return None
def __str__(self):
return u'<Departure> trip_number: {0} {1} {2}'.format(self.trip_number, self.destination, self.departure_time)
class TripRemark(BaseObject):
"""
Notes on this route, generally about disruptions
"""
def __init__(self, part_dict=None):
if part_dict is None:
return
self.key = part_dict['Id']
if part_dict['Ernstig'] == 'false':
self.is_grave = False
else:
self.is_grave = True
self.message = part_dict['Text']
def __str__(self):
return u'<TripRemark> {0} {1}'.format(self.is_grave, self.message)
class TripStop(BaseObject):
"""
Information on a stop on a route (station, time, platform)
"""
def __init__(self, part_dict=None):
if part_dict is None:
return
self.name = part_dict['Naam']
try:
self.time = load_datetime(part_dict['Tijd'], NS_DATETIME)
self.key = simple_time(self.time) + '_' + self.name
except TypeError:
# In some rare cases part_dict['Tijd'] can be None
#self.time = datetime(2000, 1, 1, 0, 0, 0)
self.time = None
self.key = None
self.platform_changed = False
try:
self.platform = part_dict['Spoor']['#text']
if part_dict['Spoor']['@wijziging'] == 'true':
self.platform_changed = True
except KeyError:
self.platform = None
try:
self.delay = part_dict['VertrekVertraging']
except KeyError:
self.delay = None
def __getstate__(self):
result = super(TripStop, self).__getstate__()
result['time'] = result['time'].isoformat()
return result
def __setstate__(self, source_dict):
super(TripStop, self).__setstate__(source_dict)
self.time = load_datetime(self.time, NS_DATETIME)
def __str__(self):
return u'<TripStop> {0}'.format(self.name)
class TripSubpart(BaseObject):
"""
Sub route; each part means a transfer
"""
def __init__(self, part_dict=None):
if part_dict is None:
return
self.trip_type = part_dict['@reisSoort']
self.transporter = part_dict['Vervoerder']
self.transport_type = part_dict['VervoerType']
self.journey_id = part_dict['RitNummer']
# VOLGENS-PLAN, GEANNULEERD (=vervallen trein), GEWIJZIGD (=planaanpassing in de bijsturing op de dag zelf),
# OVERSTAP-NIET-MOGELIJK, VERTRAAGD, NIEUW (=extra trein)
self.status = part_dict['Status']
self.going = True
self.has_delay = False
if self.status == 'GEANNULEERD':
self.going = False
if self.status == 'GEANNULEERD' or self.status == 'GEWIJZIGD' or self.status == 'VERTRAAGD':
self.has_delay = True
try:
self.disruption_key = part_dict['OngeplandeStoringId']
except KeyError:
self.disruption_key = None
self.stops = []
raw_stops = part_dict['ReisStop']
for raw_stop in raw_stops:
stop = TripStop(raw_stop)
self.stops.append(stop)
@property
def destination(self):
return self.stops[-1].name
@property
def departure_time(self):
return self.stops[0].time
def has_departure_delay(self, arrival_check=True):
if arrival_check==False and self.has_delay:
# Check whether one or more stops have delay, except last one
delay_found = False
for stop in self.stops:
if stop.delay and stop:
delay_found = True
elif stop.delay == False and stop == self.stops[-1]:
# Last stop and it doesn't have a delay
return delay_found
else:
return self.has_delay
def __getstate__(self):
result = super(TripSubpart, self).__getstate__()
stops = []
for stop in self.stops:
stops.append(stop.to_json())
result['stops'] = stops
return result
def __setstate__(self, source_dict):
super(TripSubpart, self).__setstate__(source_dict)
trip_stops = []
for raw_stop in self.stops:
trip_stop = TripStop()
trip_stop.from_json(raw_stop)
trip_stops.append(trip_stop)
self.stops = trip_stops
def __str__(self):
return u'<TripSubpart> [{0}] {1} {2} {3} {4}'.format(self.going, self.journey_id, self.trip_type, self.transport_type, self.status)
class Trip(BaseObject):
"""
Suggested route for the provided departure/destination combination
"""
def __init__(self, trip_dict=None, datetime=None):
if trip_dict is None:
return
# self.key = ??
try:
# VOLGENS-PLAN, GEWIJZIGD, VERTRAAGD, NIEUW, NIET-OPTIMAAL, NIET-MOGELIJK, PLAN-GEWIJZIGD
self.status = trip_dict['Status']
except KeyError:
self.status = None
self.nr_transfers = trip_dict['AantalOverstappen']
try:
self.travel_time_planned = trip_dict['GeplandeReisTijd']
self.going = True
except KeyError:
# Train has been cancelled
self.travel_time_planned = None
self.going = False
if self.status == 'NIET-MOGELIJK':
# Train has been cancelled
self.going = False
self.travel_time_actual = trip_dict['ActueleReisTijd']
self.is_optimal = True if trip_dict['Optimaal'] == 'true' else False
dt_format = "%Y-%m-%dT%H:%M:%S%z"
self.requested_time = datetime
try:
self.departure_time_planned = load_datetime(trip_dict['GeplandeVertrekTijd'], dt_format)
except:
self.departure_time_planned = None
try:
self.departure_time_actual = load_datetime(trip_dict['ActueleVertrekTijd'], dt_format)
except:
self.departure_time_actual = None
try:
self.arrival_time_planned = load_datetime(trip_dict['GeplandeAankomstTijd'], dt_format)
except:
self.arrival_time_planned = None
try:
self.arrival_time_actual = load_datetime(trip_dict['ActueleAankomstTijd'], dt_format)
except:
self.arrival_time_actual = None
self.trip_parts = []
raw_parts = trip_dict['ReisDeel']
if isinstance(trip_dict['ReisDeel'], collections.OrderedDict):
raw_parts = [trip_dict['ReisDeel']]
for part in raw_parts:
trip_part = TripSubpart(part)
self.trip_parts.append(trip_part)
try:
raw_remarks = trip_dict['Melding']
self.trip_remarks = []
if isinstance(raw_remarks, collections.OrderedDict):
raw_remarks = [raw_remarks]
for remark in raw_remarks:
trip_remark = TripRemark(remark)
self.trip_remarks.append(trip_remark)
except KeyError:
self.trip_remarks = []
@property
def departure(self):
return self.trip_parts[0].stops[0].name
@property
def destination(self):
return self.trip_parts[-1].stops[-1].name
@property
def delay(self):
"""
Return the delay of the train for this instance
"""
delay = {'departure_time': None, 'departure_delay': None, 'requested_differs': None,
'remarks': self.trip_remarks, 'parts': []}
if self.departure_time_actual > self.departure_time_planned:
delay['departure_delay'] = self.departure_time_actual - self.departure_time_planned
delay['departure_time'] = self.departure_time_actual
if self.requested_time != self.departure_time_actual:
delay['requested_differs'] = self.departure_time_actual
for part in self.trip_parts:
if part.has_delay:
delay['parts'].append(part)
return delay
def has_delay(self, arrival_check=True):
if self.status != 'VOLGENS-PLAN':
return True
for subpart in self.trip_parts:
if subpart.has_delay:
if subpart == self.trip_parts[-1]:
# Is last part of the trip, check if it is only the arrival
return subpart.has_departure_delay(arrival_check)
return True
if self.requested_time != self.departure_time_actual:
return True
return False
def has_departure_delay(self, subpartcheck=True):
"""
Deprecated
"""
if self.status != 'VOLGENS-PLAN':
return True
if subpartcheck and self.trip_parts[0].has_delay:
return True
if self.requested_time != self.departure_time_actual:
return True
return False
def __getstate__(self):
result = super(Trip, self).__getstate__()
result['requested_time'] = result['requested_time'].isoformat()
result['departure_time_actual'] = result['departure_time_actual'].isoformat()
result['arrival_time_actual'] = result['arrival_time_actual'].isoformat()
result['departure_time_planned'] = result['departure_time_planned'].isoformat()
result['arrival_time_planned'] = result['arrival_time_planned'].isoformat()
trip_parts = []
for trip_part in result['trip_parts']:
trip_parts.append(trip_part.to_json())
result['trip_parts'] = trip_parts
trip_remarks = []
for trip_remark in result['trip_remarks']:
trip_remarks.append(trip_remark.to_json())
result['trip_remarks'] = trip_remarks
return result
def __setstate__(self, source_dict):
super(Trip, self).__setstate__(source_dict)
# TripSubpart deserialisation
trip_parts = []
subparts = self.trip_parts
for part in subparts:
subpart = TripSubpart()
subpart.from_json(part)
trip_parts.append(subpart)
self.trip_parts = trip_parts
# TripRemark deserialisation
trip_remarks = []
remarks = self.trip_remarks
for raw_remark in remarks:
remark = TripRemark()
remark.from_json(raw_remark)
trip_remarks.append(remark)
self.trip_remarks = trip_remarks
# Datetime stamps
self.departure_time_planned = load_datetime(self.departure_time_planned, NS_DATETIME)
self.departure_time_actual = load_datetime(self.departure_time_actual, NS_DATETIME)
self.arrival_time_planned = load_datetime(self.arrival_time_planned, NS_DATETIME)
self.arrival_time_actual = load_datetime(self.arrival_time_actual, NS_DATETIME)
self.requested_time = load_datetime(self.requested_time, NS_DATETIME)
def delay_text(self):
"""
If trip has delays, format a natural language summary
"""
# TODO implement
pass
@classmethod
def get_actual(cls, trip_list, time):
"""
Look for the train actually leaving at time
"""
for trip in trip_list:
if simple_time(trip.departure_time_planned) == time:
return trip
return None
@classmethod
def get_optimal(cls, trip_list):
"""
Look for the optimal trip in the list
"""
for trip in trip_list:
if trip.is_optimal:
return trip
return None
def __str__(self):
return u'<Trip> {0} plan: {1} actual: {2} transfers: {3}'.format(self.has_delay, self.departure_time_planned, self.departure_time_actual, self.nr_transfers)
class NSAPI(object):
"""
NS API object
Library to query the official Dutch railways API
"""
def __init__(self, username, apikey):
self.username = username
self.apikey = apikey
def _request(self, method, url, postdata=None, params=None):
headers = {"Accept": "application/xml",
"Content-Type": "application/xml",
"User-Agent": "ns_api"}
if postdata:
postdata = json.dumps(postdata)
r = requests.request(method,
url,
data=postdata,
params=params,
headers=headers,
files=None,
auth=HTTPBasicAuth(self.username, self.apikey))
r.encoding = 'utf-8'
r.raise_for_status()
return r.text
def parse_disruptions(self, xml):
"""
Parse the NS API xml result into Disruption objects
@param xml: raw XML result from the NS API
"""
obj = xmltodict.parse(xml)
disruptions = {}
disruptions['unplanned'] = []
disruptions['planned'] = []
if obj['Storingen']['Ongepland']:
raw_disruptions = obj['Storingen']['Ongepland']['Storing']
if isinstance(raw_disruptions, collections.OrderedDict):
raw_disruptions = [raw_disruptions]
for disruption in raw_disruptions:
newdis = Disruption(disruption)
#print(newdis.__dict__)
disruptions['unplanned'].append(newdis)
if obj['Storingen']['Gepland']:
raw_disruptions = obj['Storingen']['Gepland']['Storing']
if isinstance(raw_disruptions, collections.OrderedDict):
raw_disruptions = [raw_disruptions]
for disruption in raw_disruptions:
newdis = Disruption(disruption)
#print(newdis.__dict__)
disruptions['planned'].append(newdis)
return disruptions
def get_disruptions(self, station=None, actual=True, unplanned=True):
"""
Fetch the current disruptions, or even the planned ones
@param station: station to lookup
@param actual: only actual disruptions, or a
actuele storingen (=ongeplande storingen + actuele werkzaamheden)
geplande werkzaamheden (=geplande werkzaamheden)
actuele storingen voor een gespecificeerd station (=ongeplande storingen + actuele werkzaamheden)
"""
url = "http://webservices.ns.nl/ns-api-storingen?station=${Stationsnaam}&actual=${true or false}&unplanned=${true or false}"
url = "http://webservices.ns.nl/ns-api-storingen?actual=true&unplanned=true"
raw_disruptions = self._request('GET', url)
return self.parse_disruptions(raw_disruptions)
def parse_departures(self, xml):
"""
Parse the NS API xml result into Departure objects
@param xml: raw XML result from the NS API
"""
obj = xmltodict.parse(xml)
departures = []
for departure in obj['ActueleVertrekTijden']['VertrekkendeTrein']:
newdep = Departure(departure)
departures.append(newdep)
#print('-- dep --')
#print(newdep.__dict__)
#print(newdep.to_json())
print(newdep.delay)
return departures
def get_departures(self, station):
"""
Fetch the current departure times from this station
http://webservices.ns.nl/ns-api-avt?station=${Naam of afkorting Station}
@param station: station to lookup
"""
url = 'http://webservices.ns.nl/ns-api-avt?station=' + station
raw_departures = self._request('GET', url)
return self.parse_departures(raw_departures)
def parse_trips(self, xml, requested_time):
"""
Parse the NS API xml result into Trip objects
"""
obj = xmltodict.parse(xml)
trips = []
if 'error' in obj:
print('Error in trips: ' + obj['error']['message'])
return None
try:
for trip in obj['ReisMogelijkheden']['ReisMogelijkheid']:
newtrip = Trip(trip, requested_time)
trips.append(newtrip)
except TypeError:
# If no options are found, obj['ReisMogelijkheden'] is None
return None
return trips
def get_trips(self, timestamp, start, via, destination, departure=True, prev_advices=1, next_advices=1):
"""
Fetch trip possibilities for these parameters
http://webservices.ns.nl/ns-api-treinplanner?<parameters>
fromStation
toStation
dateTime: 2012-02-21T15:50
departure: true for starting at timestamp, false for arriving at timestamp
previousAdvices
nextAdvices
"""
timezonestring = '+0100'
if is_dst('Europe/Amsterdam'):
timezonestring = '+0200'
url = 'http://webservices.ns.nl/ns-api-treinplanner?'
url = url + 'fromStation=' + start
url = url + '&toStation=' + destination
if via:
url = url + '&via=' + via
if len(timestamp) == 5:
# Format of HH:MM - api needs yyyy-mm-ddThh:mm
timestamp = time.strftime("%Y-%m-%d") + 'T' + timestamp
#requested_time = datetime.strptime(timestamp, "%Y-%m-%dT%H:%M")
# TODO: DST/normal time
requested_time = load_datetime(timestamp + timezonestring, "%Y-%m-%dT%H:%M%z")
else:
#requested_time = datetime.strptime(timestamp, "%d-%m-%Y %H:%M")
requested_time = load_datetime(timestamp + timezonestring, "%d-%m-%Y %H:%M%z")
timestamp = datetime.strptime(timestamp, "%d-%m-%Y %H:%M").strftime("%Y-%m-%dT%H:%M")
url = url + '&previousAdvices=' + str(prev_advices)
url = url + '&nextAdvices=' + str(next_advices)
url = url + '&dateTime=' + timestamp
raw_trips = self._request('GET', url)
return self.parse_trips(raw_trips, requested_time)
def parse_stations(self, xml):
obj = xmltodict.parse(xml)
stations = []
for station in obj['Stations']['Station']:
newstat = Station(station)
stations.append(newstat)
print(len(stations))
return stations
def get_stations(self):
"""
Fetch the list of stations
"""
url = 'http://webservices.ns.nl/ns-api-stations-v2'
raw_stations = self._request('GET', url)
return self.parse_stations(raw_stations)
|
aquatix/ns-api
|
ns_api.py
|
is_dst
|
python
|
def is_dst(zonename):
tz = pytz.timezone(zonename)
now = pytz.utc.localize(datetime.utcnow())
return now.astimezone(tz).dst() != timedelta(0)
|
Find out whether it's Daylight Saving Time in this timezone
|
train
|
https://github.com/aquatix/ns-api/blob/9b3379f8df6217132f457c4363457c16321c2448/ns_api.py#L43-L49
| null |
"""
Library to query the official Dutch railways API
"""
from __future__ import print_function
import collections
import json
import time
from datetime import datetime, timedelta
import pytz
import requests
import xmltodict
from future.utils import python_2_unicode_compatible
from pytz.tzinfo import StaticTzInfo
from requests.auth import HTTPBasicAuth
## ns-api library version
__version__ = '2.7.5'
## Date/time helpers
NS_DATETIME = "%Y-%m-%dT%H:%M:%S%z"
def datetime_to_string(timestamp, dt_format='%Y-%m-%d %H:%M:%S'):
"""
Format datetime object to string
"""
return timestamp.strftime(dt_format)
def simple_time(value):
"""
Format a datetime or timedelta object to a string of format HH:MM
"""
if isinstance(value, timedelta):
return ':'.join(str(value).split(':')[:2])
return datetime_to_string(value, '%H:%M')
## Timezone helpers
class OffsetTime(StaticTzInfo):
"""
A dumb timezone based on offset such as +0530, -0600, etc.
"""
def __init__(self, offset):
hours = int(offset[:3])
minutes = int(offset[0] + offset[3:])
self._utcoffset = timedelta(hours=hours, minutes=minutes)
def load_datetime(value, dt_format):
"""
Create timezone-aware datetime object
"""
if dt_format.endswith('%z'):
dt_format = dt_format[:-2]
offset = value[-5:]
value = value[:-5]
if offset != offset.replace(':', ''):
# strip : from HHMM if needed (isoformat() adds it between HH and MM)
offset = '+' + offset.replace(':', '')
value = value[:-1]
return OffsetTime(offset).localize(datetime.strptime(value, dt_format))
return datetime.strptime(value, dt_format)
## List helpers
def list_to_json(source_list):
"""
Serialise all the items in source_list to json
"""
result = []
for item in source_list:
result.append(item.to_json())
return result
def list_from_json(source_list_json):
"""
Deserialise all the items in source_list from json
"""
result = []
if source_list_json == [] or source_list_json == None:
return result
for list_item in source_list_json:
item = json.loads(list_item)
try:
if item['class_name'] == 'Departure':
temp = Departure()
elif item['class_name'] == 'Disruption':
temp = Disruption()
elif item['class_name'] == 'Station':
temp = Station()
elif item['class_name'] == 'Trip':
temp = Trip()
elif item['class_name'] == 'TripRemark':
temp = TripRemark()
elif item['class_name'] == 'TripStop':
temp = TripStop()
elif item['class_name'] == 'TripSubpart':
temp = TripSubpart()
else:
print('Unrecognised Class ' + item['class_name'] + ', skipping')
continue
temp.from_json(list_item)
result.append(temp)
except KeyError:
print('Unrecognised item with no class_name, skipping')
continue
return result
def list_diff(list_a, list_b):
"""
Return the items from list_b that differ from list_a
"""
result = []
for item in list_b:
if not item in list_a:
result.append(item)
return result
def list_same(list_a, list_b):
"""
Return the items from list_b that are also on list_a
"""
result = []
for item in list_b:
if item in list_a:
result.append(item)
return result
def list_merge(list_a, list_b):
"""
Merge two lists without duplicating items
Args:
list_a: list
list_b: list
Returns:
New list with deduplicated items from list_a and list_b
"""
#return list(collections.OrderedDict.fromkeys(list_a + list_b))
#result = list(list_b)
result = []
for item in list_a:
if not item in result:
result.append(item)
for item in list_b:
if not item in result:
result.append(item)
return result
## NS API objects
@python_2_unicode_compatible
class BaseObject(object):
"""
Base object with useful functions
"""
def __getstate__(self):
result = self.__dict__.copy()
result['class_name'] = self.__class__.__name__
return result
def to_json(self):
"""
Create a JSON representation of this model
"""
#return json.dumps(self.__getstate__())
return json.dumps(self.__getstate__(), ensure_ascii=False)
def __setstate__(self, source_dict):
if not source_dict:
# Somehow the source is None
return
del source_dict['class_name']
self.__dict__ = source_dict
def from_json(self, source_json):
"""
Parse a JSON representation of this model back to, well, the model
"""
#source_dict = json.JSONDecoder(object_pairs_hook=collections.OrderedDict).decode(source_json)
source_dict = json.JSONDecoder().decode(source_json)
self.__setstate__(source_dict)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return self.__str__()
def __str__(self):
raise NotImplementedError('subclasses must override __str__()')
class Station(BaseObject):
"""
Information on a railway station
"""
def __init__(self, stat_dict=None):
if stat_dict is None:
return
self.key = stat_dict['Code']
self.code = stat_dict['Code']
self.uic_code = stat_dict['UICCode']
self.stationtype = stat_dict['Type']
self.names = {
'short': stat_dict['Namen']['Kort'],
'middle': stat_dict['Namen']['Middel'],
'long': stat_dict['Namen']['Lang']
}
self.country = stat_dict['Land']
self.lat = stat_dict['Lat']
self.lon = stat_dict['Lon']
self.synonyms = []
try:
raw_synonyms = stat_dict['Synoniemen']['Synoniem']
if isinstance(raw_synonyms, str):
raw_synonyms = [raw_synonyms]
for synonym in raw_synonyms:
self.synonyms.append(synonym)
except TypeError:
self.synonyms = []
def __str__(self):
return u'<Station> {0} {1}'.format(self.code, self.names['long'])
class Disruption(BaseObject):
"""
Planned and unplanned disruptions of the railroad traffic
"""
def __init__(self, part_dict=None):
if part_dict is None:
return
self.key = part_dict['id']
self.line = part_dict['Traject']
self.message = part_dict['Bericht']
try:
self.reason = part_dict['Reden']
except KeyError:
self.reason = None
try:
self.cause = part_dict['Oorzaak']
except KeyError:
self.cause = None
try:
self.delay_text = part_dict['Vertraging']
except KeyError:
self.delay_text = None
try:
self.timestamp = load_datetime(part_dict['Datum'], NS_DATETIME)
except:
self.timestamp = None
def __getstate__(self):
result = super(Disruption, self).__getstate__()
result['timestamp'] = result['timestamp'].isoformat()
return result
def __setstate__(self, source_dict):
super(Disruption, self).__setstate__(source_dict)
self.timestamp = load_datetime(self.timestamp, NS_DATETIME)
def __str__(self):
return u'<Disruption> {0}'.format(self.line)
#return u'<Disruption> {0}'.format(self.key)
class Departure(BaseObject):
"""
Information on a departing train on a certain station
"""
def __init__(self, departure_dict=None):
if departure_dict is None:
return
self.key = departure_dict['RitNummer'] + '_' + departure_dict['VertrekTijd']
self.trip_number = departure_dict['RitNummer']
self.departure_time = load_datetime(departure_dict['VertrekTijd'], NS_DATETIME)
try:
self.has_delay = True
self.departure_delay = departure_dict['VertrekVertraging']
self.departure_delay_text = departure_dict['VertrekVertragingTekst']
except KeyError:
self.has_delay = False
self.departure_platform = departure_dict['VertrekSpoor']
self.departure_platform_changed = departure_dict['VertrekSpoor']['@wijziging']
self.destination = departure_dict['EindBestemming']
try:
self.route_text = departure_dict['RouteTekst']
except KeyError:
self.route_text = None
self.train_type = departure_dict = ['TreinSoort']
self.carrier = departure_dict = ['Vervoerder']
try:
self.journey_tip = departure_dict = ['ReisTip']
except KeyError:
self.journey_tip = None
try:
self.remarks = departure_dict = ['Opmerkingen']
except KeyError:
self.remarks = []
def __getstate__(self):
result = super(Departure, self).__getstate__()
result['departure_time'] = result['departure_time'].isoformat()
return result
def __setstate__(self, source_dict):
super(Departure, self).__setstate__(source_dict)
self.departure_time = load_datetime(source_dict['VertrekTijd'], NS_DATETIME)
@property
def delay(self):
"""
Return the delay of the train for this instance
"""
if self.has_delay:
return self.departure_delay
else:
return None
def __str__(self):
return u'<Departure> trip_number: {0} {1} {2}'.format(self.trip_number, self.destination, self.departure_time)
class TripRemark(BaseObject):
"""
Notes on this route, generally about disruptions
"""
def __init__(self, part_dict=None):
if part_dict is None:
return
self.key = part_dict['Id']
if part_dict['Ernstig'] == 'false':
self.is_grave = False
else:
self.is_grave = True
self.message = part_dict['Text']
def __str__(self):
return u'<TripRemark> {0} {1}'.format(self.is_grave, self.message)
class TripStop(BaseObject):
"""
Information on a stop on a route (station, time, platform)
"""
def __init__(self, part_dict=None):
if part_dict is None:
return
self.name = part_dict['Naam']
try:
self.time = load_datetime(part_dict['Tijd'], NS_DATETIME)
self.key = simple_time(self.time) + '_' + self.name
except TypeError:
# In some rare cases part_dict['Tijd'] can be None
#self.time = datetime(2000, 1, 1, 0, 0, 0)
self.time = None
self.key = None
self.platform_changed = False
try:
self.platform = part_dict['Spoor']['#text']
if part_dict['Spoor']['@wijziging'] == 'true':
self.platform_changed = True
except KeyError:
self.platform = None
try:
self.delay = part_dict['VertrekVertraging']
except KeyError:
self.delay = None
def __getstate__(self):
result = super(TripStop, self).__getstate__()
result['time'] = result['time'].isoformat()
return result
def __setstate__(self, source_dict):
super(TripStop, self).__setstate__(source_dict)
self.time = load_datetime(self.time, NS_DATETIME)
def __str__(self):
return u'<TripStop> {0}'.format(self.name)
class TripSubpart(BaseObject):
"""
Sub route; each part means a transfer
"""
def __init__(self, part_dict=None):
if part_dict is None:
return
self.trip_type = part_dict['@reisSoort']
self.transporter = part_dict['Vervoerder']
self.transport_type = part_dict['VervoerType']
self.journey_id = part_dict['RitNummer']
# VOLGENS-PLAN, GEANNULEERD (=vervallen trein), GEWIJZIGD (=planaanpassing in de bijsturing op de dag zelf),
# OVERSTAP-NIET-MOGELIJK, VERTRAAGD, NIEUW (=extra trein)
self.status = part_dict['Status']
self.going = True
self.has_delay = False
if self.status == 'GEANNULEERD':
self.going = False
if self.status == 'GEANNULEERD' or self.status == 'GEWIJZIGD' or self.status == 'VERTRAAGD':
self.has_delay = True
try:
self.disruption_key = part_dict['OngeplandeStoringId']
except KeyError:
self.disruption_key = None
self.stops = []
raw_stops = part_dict['ReisStop']
for raw_stop in raw_stops:
stop = TripStop(raw_stop)
self.stops.append(stop)
@property
def destination(self):
return self.stops[-1].name
@property
def departure_time(self):
return self.stops[0].time
def has_departure_delay(self, arrival_check=True):
if arrival_check==False and self.has_delay:
# Check whether one or more stops have delay, except last one
delay_found = False
for stop in self.stops:
if stop.delay and stop:
delay_found = True
elif stop.delay == False and stop == self.stops[-1]:
# Last stop and it doesn't have a delay
return delay_found
else:
return self.has_delay
def __getstate__(self):
result = super(TripSubpart, self).__getstate__()
stops = []
for stop in self.stops:
stops.append(stop.to_json())
result['stops'] = stops
return result
def __setstate__(self, source_dict):
super(TripSubpart, self).__setstate__(source_dict)
trip_stops = []
for raw_stop in self.stops:
trip_stop = TripStop()
trip_stop.from_json(raw_stop)
trip_stops.append(trip_stop)
self.stops = trip_stops
def __str__(self):
return u'<TripSubpart> [{0}] {1} {2} {3} {4}'.format(self.going, self.journey_id, self.trip_type, self.transport_type, self.status)
class Trip(BaseObject):
"""
Suggested route for the provided departure/destination combination
"""
def __init__(self, trip_dict=None, datetime=None):
if trip_dict is None:
return
# self.key = ??
try:
# VOLGENS-PLAN, GEWIJZIGD, VERTRAAGD, NIEUW, NIET-OPTIMAAL, NIET-MOGELIJK, PLAN-GEWIJZIGD
self.status = trip_dict['Status']
except KeyError:
self.status = None
self.nr_transfers = trip_dict['AantalOverstappen']
try:
self.travel_time_planned = trip_dict['GeplandeReisTijd']
self.going = True
except KeyError:
# Train has been cancelled
self.travel_time_planned = None
self.going = False
if self.status == 'NIET-MOGELIJK':
# Train has been cancelled
self.going = False
self.travel_time_actual = trip_dict['ActueleReisTijd']
self.is_optimal = True if trip_dict['Optimaal'] == 'true' else False
dt_format = "%Y-%m-%dT%H:%M:%S%z"
self.requested_time = datetime
try:
self.departure_time_planned = load_datetime(trip_dict['GeplandeVertrekTijd'], dt_format)
except:
self.departure_time_planned = None
try:
self.departure_time_actual = load_datetime(trip_dict['ActueleVertrekTijd'], dt_format)
except:
self.departure_time_actual = None
try:
self.arrival_time_planned = load_datetime(trip_dict['GeplandeAankomstTijd'], dt_format)
except:
self.arrival_time_planned = None
try:
self.arrival_time_actual = load_datetime(trip_dict['ActueleAankomstTijd'], dt_format)
except:
self.arrival_time_actual = None
self.trip_parts = []
raw_parts = trip_dict['ReisDeel']
if isinstance(trip_dict['ReisDeel'], collections.OrderedDict):
raw_parts = [trip_dict['ReisDeel']]
for part in raw_parts:
trip_part = TripSubpart(part)
self.trip_parts.append(trip_part)
try:
raw_remarks = trip_dict['Melding']
self.trip_remarks = []
if isinstance(raw_remarks, collections.OrderedDict):
raw_remarks = [raw_remarks]
for remark in raw_remarks:
trip_remark = TripRemark(remark)
self.trip_remarks.append(trip_remark)
except KeyError:
self.trip_remarks = []
@property
def departure(self):
return self.trip_parts[0].stops[0].name
@property
def destination(self):
return self.trip_parts[-1].stops[-1].name
@property
def delay(self):
"""
Return the delay of the train for this instance
"""
delay = {'departure_time': None, 'departure_delay': None, 'requested_differs': None,
'remarks': self.trip_remarks, 'parts': []}
if self.departure_time_actual > self.departure_time_planned:
delay['departure_delay'] = self.departure_time_actual - self.departure_time_planned
delay['departure_time'] = self.departure_time_actual
if self.requested_time != self.departure_time_actual:
delay['requested_differs'] = self.departure_time_actual
for part in self.trip_parts:
if part.has_delay:
delay['parts'].append(part)
return delay
def has_delay(self, arrival_check=True):
if self.status != 'VOLGENS-PLAN':
return True
for subpart in self.trip_parts:
if subpart.has_delay:
if subpart == self.trip_parts[-1]:
# Is last part of the trip, check if it is only the arrival
return subpart.has_departure_delay(arrival_check)
return True
if self.requested_time != self.departure_time_actual:
return True
return False
def has_departure_delay(self, subpartcheck=True):
"""
Deprecated
"""
if self.status != 'VOLGENS-PLAN':
return True
if subpartcheck and self.trip_parts[0].has_delay:
return True
if self.requested_time != self.departure_time_actual:
return True
return False
def __getstate__(self):
result = super(Trip, self).__getstate__()
result['requested_time'] = result['requested_time'].isoformat()
result['departure_time_actual'] = result['departure_time_actual'].isoformat()
result['arrival_time_actual'] = result['arrival_time_actual'].isoformat()
result['departure_time_planned'] = result['departure_time_planned'].isoformat()
result['arrival_time_planned'] = result['arrival_time_planned'].isoformat()
trip_parts = []
for trip_part in result['trip_parts']:
trip_parts.append(trip_part.to_json())
result['trip_parts'] = trip_parts
trip_remarks = []
for trip_remark in result['trip_remarks']:
trip_remarks.append(trip_remark.to_json())
result['trip_remarks'] = trip_remarks
return result
def __setstate__(self, source_dict):
super(Trip, self).__setstate__(source_dict)
# TripSubpart deserialisation
trip_parts = []
subparts = self.trip_parts
for part in subparts:
subpart = TripSubpart()
subpart.from_json(part)
trip_parts.append(subpart)
self.trip_parts = trip_parts
# TripRemark deserialisation
trip_remarks = []
remarks = self.trip_remarks
for raw_remark in remarks:
remark = TripRemark()
remark.from_json(raw_remark)
trip_remarks.append(remark)
self.trip_remarks = trip_remarks
# Datetime stamps
self.departure_time_planned = load_datetime(self.departure_time_planned, NS_DATETIME)
self.departure_time_actual = load_datetime(self.departure_time_actual, NS_DATETIME)
self.arrival_time_planned = load_datetime(self.arrival_time_planned, NS_DATETIME)
self.arrival_time_actual = load_datetime(self.arrival_time_actual, NS_DATETIME)
self.requested_time = load_datetime(self.requested_time, NS_DATETIME)
def delay_text(self):
"""
If trip has delays, format a natural language summary
"""
# TODO implement
pass
@classmethod
def get_actual(cls, trip_list, time):
"""
Look for the train actually leaving at time
"""
for trip in trip_list:
if simple_time(trip.departure_time_planned) == time:
return trip
return None
@classmethod
def get_optimal(cls, trip_list):
"""
Look for the optimal trip in the list
"""
for trip in trip_list:
if trip.is_optimal:
return trip
return None
def __str__(self):
return u'<Trip> {0} plan: {1} actual: {2} transfers: {3}'.format(self.has_delay, self.departure_time_planned, self.departure_time_actual, self.nr_transfers)
class NSAPI(object):
"""
NS API object
Library to query the official Dutch railways API
"""
def __init__(self, username, apikey):
self.username = username
self.apikey = apikey
def _request(self, method, url, postdata=None, params=None):
headers = {"Accept": "application/xml",
"Content-Type": "application/xml",
"User-Agent": "ns_api"}
if postdata:
postdata = json.dumps(postdata)
r = requests.request(method,
url,
data=postdata,
params=params,
headers=headers,
files=None,
auth=HTTPBasicAuth(self.username, self.apikey))
r.encoding = 'utf-8'
r.raise_for_status()
return r.text
def parse_disruptions(self, xml):
"""
Parse the NS API xml result into Disruption objects
@param xml: raw XML result from the NS API
"""
obj = xmltodict.parse(xml)
disruptions = {}
disruptions['unplanned'] = []
disruptions['planned'] = []
if obj['Storingen']['Ongepland']:
raw_disruptions = obj['Storingen']['Ongepland']['Storing']
if isinstance(raw_disruptions, collections.OrderedDict):
raw_disruptions = [raw_disruptions]
for disruption in raw_disruptions:
newdis = Disruption(disruption)
#print(newdis.__dict__)
disruptions['unplanned'].append(newdis)
if obj['Storingen']['Gepland']:
raw_disruptions = obj['Storingen']['Gepland']['Storing']
if isinstance(raw_disruptions, collections.OrderedDict):
raw_disruptions = [raw_disruptions]
for disruption in raw_disruptions:
newdis = Disruption(disruption)
#print(newdis.__dict__)
disruptions['planned'].append(newdis)
return disruptions
def get_disruptions(self, station=None, actual=True, unplanned=True):
"""
Fetch the current disruptions, or even the planned ones
@param station: station to lookup
@param actual: only actual disruptions, or a
actuele storingen (=ongeplande storingen + actuele werkzaamheden)
geplande werkzaamheden (=geplande werkzaamheden)
actuele storingen voor een gespecificeerd station (=ongeplande storingen + actuele werkzaamheden)
"""
url = "http://webservices.ns.nl/ns-api-storingen?station=${Stationsnaam}&actual=${true or false}&unplanned=${true or false}"
url = "http://webservices.ns.nl/ns-api-storingen?actual=true&unplanned=true"
raw_disruptions = self._request('GET', url)
return self.parse_disruptions(raw_disruptions)
def parse_departures(self, xml):
"""
Parse the NS API xml result into Departure objects
@param xml: raw XML result from the NS API
"""
obj = xmltodict.parse(xml)
departures = []
for departure in obj['ActueleVertrekTijden']['VertrekkendeTrein']:
newdep = Departure(departure)
departures.append(newdep)
#print('-- dep --')
#print(newdep.__dict__)
#print(newdep.to_json())
print(newdep.delay)
return departures
def get_departures(self, station):
"""
Fetch the current departure times from this station
http://webservices.ns.nl/ns-api-avt?station=${Naam of afkorting Station}
@param station: station to lookup
"""
url = 'http://webservices.ns.nl/ns-api-avt?station=' + station
raw_departures = self._request('GET', url)
return self.parse_departures(raw_departures)
def parse_trips(self, xml, requested_time):
"""
Parse the NS API xml result into Trip objects
"""
obj = xmltodict.parse(xml)
trips = []
if 'error' in obj:
print('Error in trips: ' + obj['error']['message'])
return None
try:
for trip in obj['ReisMogelijkheden']['ReisMogelijkheid']:
newtrip = Trip(trip, requested_time)
trips.append(newtrip)
except TypeError:
# If no options are found, obj['ReisMogelijkheden'] is None
return None
return trips
def get_trips(self, timestamp, start, via, destination, departure=True, prev_advices=1, next_advices=1):
"""
Fetch trip possibilities for these parameters
http://webservices.ns.nl/ns-api-treinplanner?<parameters>
fromStation
toStation
dateTime: 2012-02-21T15:50
departure: true for starting at timestamp, false for arriving at timestamp
previousAdvices
nextAdvices
"""
timezonestring = '+0100'
if is_dst('Europe/Amsterdam'):
timezonestring = '+0200'
url = 'http://webservices.ns.nl/ns-api-treinplanner?'
url = url + 'fromStation=' + start
url = url + '&toStation=' + destination
if via:
url = url + '&via=' + via
if len(timestamp) == 5:
# Format of HH:MM - api needs yyyy-mm-ddThh:mm
timestamp = time.strftime("%Y-%m-%d") + 'T' + timestamp
#requested_time = datetime.strptime(timestamp, "%Y-%m-%dT%H:%M")
# TODO: DST/normal time
requested_time = load_datetime(timestamp + timezonestring, "%Y-%m-%dT%H:%M%z")
else:
#requested_time = datetime.strptime(timestamp, "%d-%m-%Y %H:%M")
requested_time = load_datetime(timestamp + timezonestring, "%d-%m-%Y %H:%M%z")
timestamp = datetime.strptime(timestamp, "%d-%m-%Y %H:%M").strftime("%Y-%m-%dT%H:%M")
url = url + '&previousAdvices=' + str(prev_advices)
url = url + '&nextAdvices=' + str(next_advices)
url = url + '&dateTime=' + timestamp
raw_trips = self._request('GET', url)
return self.parse_trips(raw_trips, requested_time)
def parse_stations(self, xml):
obj = xmltodict.parse(xml)
stations = []
for station in obj['Stations']['Station']:
newstat = Station(station)
stations.append(newstat)
print(len(stations))
return stations
def get_stations(self):
"""
Fetch the list of stations
"""
url = 'http://webservices.ns.nl/ns-api-stations-v2'
raw_stations = self._request('GET', url)
return self.parse_stations(raw_stations)
|
aquatix/ns-api
|
ns_api.py
|
load_datetime
|
python
|
def load_datetime(value, dt_format):
if dt_format.endswith('%z'):
dt_format = dt_format[:-2]
offset = value[-5:]
value = value[:-5]
if offset != offset.replace(':', ''):
# strip : from HHMM if needed (isoformat() adds it between HH and MM)
offset = '+' + offset.replace(':', '')
value = value[:-1]
return OffsetTime(offset).localize(datetime.strptime(value, dt_format))
return datetime.strptime(value, dt_format)
|
Create timezone-aware datetime object
|
train
|
https://github.com/aquatix/ns-api/blob/9b3379f8df6217132f457c4363457c16321c2448/ns_api.py#L62-L76
| null |
"""
Library to query the official Dutch railways API
"""
from __future__ import print_function
import collections
import json
import time
from datetime import datetime, timedelta
import pytz
import requests
import xmltodict
from future.utils import python_2_unicode_compatible
from pytz.tzinfo import StaticTzInfo
from requests.auth import HTTPBasicAuth
## ns-api library version
__version__ = '2.7.5'
## Date/time helpers
NS_DATETIME = "%Y-%m-%dT%H:%M:%S%z"
def datetime_to_string(timestamp, dt_format='%Y-%m-%d %H:%M:%S'):
"""
Format datetime object to string
"""
return timestamp.strftime(dt_format)
def simple_time(value):
"""
Format a datetime or timedelta object to a string of format HH:MM
"""
if isinstance(value, timedelta):
return ':'.join(str(value).split(':')[:2])
return datetime_to_string(value, '%H:%M')
## Timezone helpers
def is_dst(zonename):
"""
Find out whether it's Daylight Saving Time in this timezone
"""
tz = pytz.timezone(zonename)
now = pytz.utc.localize(datetime.utcnow())
return now.astimezone(tz).dst() != timedelta(0)
class OffsetTime(StaticTzInfo):
"""
A dumb timezone based on offset such as +0530, -0600, etc.
"""
def __init__(self, offset):
hours = int(offset[:3])
minutes = int(offset[0] + offset[3:])
self._utcoffset = timedelta(hours=hours, minutes=minutes)
## List helpers
def list_to_json(source_list):
"""
Serialise all the items in source_list to json
"""
result = []
for item in source_list:
result.append(item.to_json())
return result
def list_from_json(source_list_json):
"""
Deserialise all the items in source_list from json
"""
result = []
if source_list_json == [] or source_list_json == None:
return result
for list_item in source_list_json:
item = json.loads(list_item)
try:
if item['class_name'] == 'Departure':
temp = Departure()
elif item['class_name'] == 'Disruption':
temp = Disruption()
elif item['class_name'] == 'Station':
temp = Station()
elif item['class_name'] == 'Trip':
temp = Trip()
elif item['class_name'] == 'TripRemark':
temp = TripRemark()
elif item['class_name'] == 'TripStop':
temp = TripStop()
elif item['class_name'] == 'TripSubpart':
temp = TripSubpart()
else:
print('Unrecognised Class ' + item['class_name'] + ', skipping')
continue
temp.from_json(list_item)
result.append(temp)
except KeyError:
print('Unrecognised item with no class_name, skipping')
continue
return result
def list_diff(list_a, list_b):
"""
Return the items from list_b that differ from list_a
"""
result = []
for item in list_b:
if not item in list_a:
result.append(item)
return result
def list_same(list_a, list_b):
"""
Return the items from list_b that are also on list_a
"""
result = []
for item in list_b:
if item in list_a:
result.append(item)
return result
def list_merge(list_a, list_b):
"""
Merge two lists without duplicating items
Args:
list_a: list
list_b: list
Returns:
New list with deduplicated items from list_a and list_b
"""
#return list(collections.OrderedDict.fromkeys(list_a + list_b))
#result = list(list_b)
result = []
for item in list_a:
if not item in result:
result.append(item)
for item in list_b:
if not item in result:
result.append(item)
return result
## NS API objects
@python_2_unicode_compatible
class BaseObject(object):
"""
Base object with useful functions
"""
def __getstate__(self):
result = self.__dict__.copy()
result['class_name'] = self.__class__.__name__
return result
def to_json(self):
"""
Create a JSON representation of this model
"""
#return json.dumps(self.__getstate__())
return json.dumps(self.__getstate__(), ensure_ascii=False)
def __setstate__(self, source_dict):
if not source_dict:
# Somehow the source is None
return
del source_dict['class_name']
self.__dict__ = source_dict
def from_json(self, source_json):
"""
Parse a JSON representation of this model back to, well, the model
"""
#source_dict = json.JSONDecoder(object_pairs_hook=collections.OrderedDict).decode(source_json)
source_dict = json.JSONDecoder().decode(source_json)
self.__setstate__(source_dict)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return self.__str__()
def __str__(self):
raise NotImplementedError('subclasses must override __str__()')
class Station(BaseObject):
"""
Information on a railway station
"""
def __init__(self, stat_dict=None):
if stat_dict is None:
return
self.key = stat_dict['Code']
self.code = stat_dict['Code']
self.uic_code = stat_dict['UICCode']
self.stationtype = stat_dict['Type']
self.names = {
'short': stat_dict['Namen']['Kort'],
'middle': stat_dict['Namen']['Middel'],
'long': stat_dict['Namen']['Lang']
}
self.country = stat_dict['Land']
self.lat = stat_dict['Lat']
self.lon = stat_dict['Lon']
self.synonyms = []
try:
raw_synonyms = stat_dict['Synoniemen']['Synoniem']
if isinstance(raw_synonyms, str):
raw_synonyms = [raw_synonyms]
for synonym in raw_synonyms:
self.synonyms.append(synonym)
except TypeError:
self.synonyms = []
def __str__(self):
return u'<Station> {0} {1}'.format(self.code, self.names['long'])
class Disruption(BaseObject):
"""
Planned and unplanned disruptions of the railroad traffic
"""
def __init__(self, part_dict=None):
if part_dict is None:
return
self.key = part_dict['id']
self.line = part_dict['Traject']
self.message = part_dict['Bericht']
try:
self.reason = part_dict['Reden']
except KeyError:
self.reason = None
try:
self.cause = part_dict['Oorzaak']
except KeyError:
self.cause = None
try:
self.delay_text = part_dict['Vertraging']
except KeyError:
self.delay_text = None
try:
self.timestamp = load_datetime(part_dict['Datum'], NS_DATETIME)
except:
self.timestamp = None
def __getstate__(self):
result = super(Disruption, self).__getstate__()
result['timestamp'] = result['timestamp'].isoformat()
return result
def __setstate__(self, source_dict):
super(Disruption, self).__setstate__(source_dict)
self.timestamp = load_datetime(self.timestamp, NS_DATETIME)
def __str__(self):
return u'<Disruption> {0}'.format(self.line)
#return u'<Disruption> {0}'.format(self.key)
class Departure(BaseObject):
"""
Information on a departing train on a certain station
"""
def __init__(self, departure_dict=None):
if departure_dict is None:
return
self.key = departure_dict['RitNummer'] + '_' + departure_dict['VertrekTijd']
self.trip_number = departure_dict['RitNummer']
self.departure_time = load_datetime(departure_dict['VertrekTijd'], NS_DATETIME)
try:
self.has_delay = True
self.departure_delay = departure_dict['VertrekVertraging']
self.departure_delay_text = departure_dict['VertrekVertragingTekst']
except KeyError:
self.has_delay = False
self.departure_platform = departure_dict['VertrekSpoor']
self.departure_platform_changed = departure_dict['VertrekSpoor']['@wijziging']
self.destination = departure_dict['EindBestemming']
try:
self.route_text = departure_dict['RouteTekst']
except KeyError:
self.route_text = None
self.train_type = departure_dict = ['TreinSoort']
self.carrier = departure_dict = ['Vervoerder']
try:
self.journey_tip = departure_dict = ['ReisTip']
except KeyError:
self.journey_tip = None
try:
self.remarks = departure_dict = ['Opmerkingen']
except KeyError:
self.remarks = []
def __getstate__(self):
result = super(Departure, self).__getstate__()
result['departure_time'] = result['departure_time'].isoformat()
return result
def __setstate__(self, source_dict):
super(Departure, self).__setstate__(source_dict)
self.departure_time = load_datetime(source_dict['VertrekTijd'], NS_DATETIME)
@property
def delay(self):
"""
Return the delay of the train for this instance
"""
if self.has_delay:
return self.departure_delay
else:
return None
def __str__(self):
return u'<Departure> trip_number: {0} {1} {2}'.format(self.trip_number, self.destination, self.departure_time)
class TripRemark(BaseObject):
"""
Notes on this route, generally about disruptions
"""
def __init__(self, part_dict=None):
if part_dict is None:
return
self.key = part_dict['Id']
if part_dict['Ernstig'] == 'false':
self.is_grave = False
else:
self.is_grave = True
self.message = part_dict['Text']
def __str__(self):
return u'<TripRemark> {0} {1}'.format(self.is_grave, self.message)
class TripStop(BaseObject):
"""
Information on a stop on a route (station, time, platform)
"""
def __init__(self, part_dict=None):
if part_dict is None:
return
self.name = part_dict['Naam']
try:
self.time = load_datetime(part_dict['Tijd'], NS_DATETIME)
self.key = simple_time(self.time) + '_' + self.name
except TypeError:
# In some rare cases part_dict['Tijd'] can be None
#self.time = datetime(2000, 1, 1, 0, 0, 0)
self.time = None
self.key = None
self.platform_changed = False
try:
self.platform = part_dict['Spoor']['#text']
if part_dict['Spoor']['@wijziging'] == 'true':
self.platform_changed = True
except KeyError:
self.platform = None
try:
self.delay = part_dict['VertrekVertraging']
except KeyError:
self.delay = None
def __getstate__(self):
result = super(TripStop, self).__getstate__()
result['time'] = result['time'].isoformat()
return result
def __setstate__(self, source_dict):
super(TripStop, self).__setstate__(source_dict)
self.time = load_datetime(self.time, NS_DATETIME)
def __str__(self):
return u'<TripStop> {0}'.format(self.name)
class TripSubpart(BaseObject):
"""
Sub route; each part means a transfer
"""
def __init__(self, part_dict=None):
if part_dict is None:
return
self.trip_type = part_dict['@reisSoort']
self.transporter = part_dict['Vervoerder']
self.transport_type = part_dict['VervoerType']
self.journey_id = part_dict['RitNummer']
# VOLGENS-PLAN, GEANNULEERD (=vervallen trein), GEWIJZIGD (=planaanpassing in de bijsturing op de dag zelf),
# OVERSTAP-NIET-MOGELIJK, VERTRAAGD, NIEUW (=extra trein)
self.status = part_dict['Status']
self.going = True
self.has_delay = False
if self.status == 'GEANNULEERD':
self.going = False
if self.status == 'GEANNULEERD' or self.status == 'GEWIJZIGD' or self.status == 'VERTRAAGD':
self.has_delay = True
try:
self.disruption_key = part_dict['OngeplandeStoringId']
except KeyError:
self.disruption_key = None
self.stops = []
raw_stops = part_dict['ReisStop']
for raw_stop in raw_stops:
stop = TripStop(raw_stop)
self.stops.append(stop)
@property
def destination(self):
return self.stops[-1].name
@property
def departure_time(self):
return self.stops[0].time
def has_departure_delay(self, arrival_check=True):
if arrival_check==False and self.has_delay:
# Check whether one or more stops have delay, except last one
delay_found = False
for stop in self.stops:
if stop.delay and stop:
delay_found = True
elif stop.delay == False and stop == self.stops[-1]:
# Last stop and it doesn't have a delay
return delay_found
else:
return self.has_delay
def __getstate__(self):
result = super(TripSubpart, self).__getstate__()
stops = []
for stop in self.stops:
stops.append(stop.to_json())
result['stops'] = stops
return result
def __setstate__(self, source_dict):
super(TripSubpart, self).__setstate__(source_dict)
trip_stops = []
for raw_stop in self.stops:
trip_stop = TripStop()
trip_stop.from_json(raw_stop)
trip_stops.append(trip_stop)
self.stops = trip_stops
def __str__(self):
return u'<TripSubpart> [{0}] {1} {2} {3} {4}'.format(self.going, self.journey_id, self.trip_type, self.transport_type, self.status)
class Trip(BaseObject):
"""
Suggested route for the provided departure/destination combination
"""
def __init__(self, trip_dict=None, datetime=None):
if trip_dict is None:
return
# self.key = ??
try:
# VOLGENS-PLAN, GEWIJZIGD, VERTRAAGD, NIEUW, NIET-OPTIMAAL, NIET-MOGELIJK, PLAN-GEWIJZIGD
self.status = trip_dict['Status']
except KeyError:
self.status = None
self.nr_transfers = trip_dict['AantalOverstappen']
try:
self.travel_time_planned = trip_dict['GeplandeReisTijd']
self.going = True
except KeyError:
# Train has been cancelled
self.travel_time_planned = None
self.going = False
if self.status == 'NIET-MOGELIJK':
# Train has been cancelled
self.going = False
self.travel_time_actual = trip_dict['ActueleReisTijd']
self.is_optimal = True if trip_dict['Optimaal'] == 'true' else False
dt_format = "%Y-%m-%dT%H:%M:%S%z"
self.requested_time = datetime
try:
self.departure_time_planned = load_datetime(trip_dict['GeplandeVertrekTijd'], dt_format)
except:
self.departure_time_planned = None
try:
self.departure_time_actual = load_datetime(trip_dict['ActueleVertrekTijd'], dt_format)
except:
self.departure_time_actual = None
try:
self.arrival_time_planned = load_datetime(trip_dict['GeplandeAankomstTijd'], dt_format)
except:
self.arrival_time_planned = None
try:
self.arrival_time_actual = load_datetime(trip_dict['ActueleAankomstTijd'], dt_format)
except:
self.arrival_time_actual = None
self.trip_parts = []
raw_parts = trip_dict['ReisDeel']
if isinstance(trip_dict['ReisDeel'], collections.OrderedDict):
raw_parts = [trip_dict['ReisDeel']]
for part in raw_parts:
trip_part = TripSubpart(part)
self.trip_parts.append(trip_part)
try:
raw_remarks = trip_dict['Melding']
self.trip_remarks = []
if isinstance(raw_remarks, collections.OrderedDict):
raw_remarks = [raw_remarks]
for remark in raw_remarks:
trip_remark = TripRemark(remark)
self.trip_remarks.append(trip_remark)
except KeyError:
self.trip_remarks = []
@property
def departure(self):
return self.trip_parts[0].stops[0].name
@property
def destination(self):
return self.trip_parts[-1].stops[-1].name
@property
def delay(self):
"""
Return the delay of the train for this instance
"""
delay = {'departure_time': None, 'departure_delay': None, 'requested_differs': None,
'remarks': self.trip_remarks, 'parts': []}
if self.departure_time_actual > self.departure_time_planned:
delay['departure_delay'] = self.departure_time_actual - self.departure_time_planned
delay['departure_time'] = self.departure_time_actual
if self.requested_time != self.departure_time_actual:
delay['requested_differs'] = self.departure_time_actual
for part in self.trip_parts:
if part.has_delay:
delay['parts'].append(part)
return delay
def has_delay(self, arrival_check=True):
if self.status != 'VOLGENS-PLAN':
return True
for subpart in self.trip_parts:
if subpart.has_delay:
if subpart == self.trip_parts[-1]:
# Is last part of the trip, check if it is only the arrival
return subpart.has_departure_delay(arrival_check)
return True
if self.requested_time != self.departure_time_actual:
return True
return False
def has_departure_delay(self, subpartcheck=True):
"""
Deprecated
"""
if self.status != 'VOLGENS-PLAN':
return True
if subpartcheck and self.trip_parts[0].has_delay:
return True
if self.requested_time != self.departure_time_actual:
return True
return False
def __getstate__(self):
result = super(Trip, self).__getstate__()
result['requested_time'] = result['requested_time'].isoformat()
result['departure_time_actual'] = result['departure_time_actual'].isoformat()
result['arrival_time_actual'] = result['arrival_time_actual'].isoformat()
result['departure_time_planned'] = result['departure_time_planned'].isoformat()
result['arrival_time_planned'] = result['arrival_time_planned'].isoformat()
trip_parts = []
for trip_part in result['trip_parts']:
trip_parts.append(trip_part.to_json())
result['trip_parts'] = trip_parts
trip_remarks = []
for trip_remark in result['trip_remarks']:
trip_remarks.append(trip_remark.to_json())
result['trip_remarks'] = trip_remarks
return result
def __setstate__(self, source_dict):
super(Trip, self).__setstate__(source_dict)
# TripSubpart deserialisation
trip_parts = []
subparts = self.trip_parts
for part in subparts:
subpart = TripSubpart()
subpart.from_json(part)
trip_parts.append(subpart)
self.trip_parts = trip_parts
# TripRemark deserialisation
trip_remarks = []
remarks = self.trip_remarks
for raw_remark in remarks:
remark = TripRemark()
remark.from_json(raw_remark)
trip_remarks.append(remark)
self.trip_remarks = trip_remarks
# Datetime stamps
self.departure_time_planned = load_datetime(self.departure_time_planned, NS_DATETIME)
self.departure_time_actual = load_datetime(self.departure_time_actual, NS_DATETIME)
self.arrival_time_planned = load_datetime(self.arrival_time_planned, NS_DATETIME)
self.arrival_time_actual = load_datetime(self.arrival_time_actual, NS_DATETIME)
self.requested_time = load_datetime(self.requested_time, NS_DATETIME)
def delay_text(self):
"""
If trip has delays, format a natural language summary
"""
# TODO implement
pass
@classmethod
def get_actual(cls, trip_list, time):
"""
Look for the train actually leaving at time
"""
for trip in trip_list:
if simple_time(trip.departure_time_planned) == time:
return trip
return None
@classmethod
def get_optimal(cls, trip_list):
"""
Look for the optimal trip in the list
"""
for trip in trip_list:
if trip.is_optimal:
return trip
return None
def __str__(self):
return u'<Trip> {0} plan: {1} actual: {2} transfers: {3}'.format(self.has_delay, self.departure_time_planned, self.departure_time_actual, self.nr_transfers)
class NSAPI(object):
"""
NS API object
Library to query the official Dutch railways API
"""
def __init__(self, username, apikey):
self.username = username
self.apikey = apikey
def _request(self, method, url, postdata=None, params=None):
headers = {"Accept": "application/xml",
"Content-Type": "application/xml",
"User-Agent": "ns_api"}
if postdata:
postdata = json.dumps(postdata)
r = requests.request(method,
url,
data=postdata,
params=params,
headers=headers,
files=None,
auth=HTTPBasicAuth(self.username, self.apikey))
r.encoding = 'utf-8'
r.raise_for_status()
return r.text
def parse_disruptions(self, xml):
"""
Parse the NS API xml result into Disruption objects
@param xml: raw XML result from the NS API
"""
obj = xmltodict.parse(xml)
disruptions = {}
disruptions['unplanned'] = []
disruptions['planned'] = []
if obj['Storingen']['Ongepland']:
raw_disruptions = obj['Storingen']['Ongepland']['Storing']
if isinstance(raw_disruptions, collections.OrderedDict):
raw_disruptions = [raw_disruptions]
for disruption in raw_disruptions:
newdis = Disruption(disruption)
#print(newdis.__dict__)
disruptions['unplanned'].append(newdis)
if obj['Storingen']['Gepland']:
raw_disruptions = obj['Storingen']['Gepland']['Storing']
if isinstance(raw_disruptions, collections.OrderedDict):
raw_disruptions = [raw_disruptions]
for disruption in raw_disruptions:
newdis = Disruption(disruption)
#print(newdis.__dict__)
disruptions['planned'].append(newdis)
return disruptions
def get_disruptions(self, station=None, actual=True, unplanned=True):
"""
Fetch the current disruptions, or even the planned ones
@param station: station to lookup
@param actual: only actual disruptions, or a
actuele storingen (=ongeplande storingen + actuele werkzaamheden)
geplande werkzaamheden (=geplande werkzaamheden)
actuele storingen voor een gespecificeerd station (=ongeplande storingen + actuele werkzaamheden)
"""
url = "http://webservices.ns.nl/ns-api-storingen?station=${Stationsnaam}&actual=${true or false}&unplanned=${true or false}"
url = "http://webservices.ns.nl/ns-api-storingen?actual=true&unplanned=true"
raw_disruptions = self._request('GET', url)
return self.parse_disruptions(raw_disruptions)
def parse_departures(self, xml):
"""
Parse the NS API xml result into Departure objects
@param xml: raw XML result from the NS API
"""
obj = xmltodict.parse(xml)
departures = []
for departure in obj['ActueleVertrekTijden']['VertrekkendeTrein']:
newdep = Departure(departure)
departures.append(newdep)
#print('-- dep --')
#print(newdep.__dict__)
#print(newdep.to_json())
print(newdep.delay)
return departures
def get_departures(self, station):
"""
Fetch the current departure times from this station
http://webservices.ns.nl/ns-api-avt?station=${Naam of afkorting Station}
@param station: station to lookup
"""
url = 'http://webservices.ns.nl/ns-api-avt?station=' + station
raw_departures = self._request('GET', url)
return self.parse_departures(raw_departures)
def parse_trips(self, xml, requested_time):
"""
Parse the NS API xml result into Trip objects
"""
obj = xmltodict.parse(xml)
trips = []
if 'error' in obj:
print('Error in trips: ' + obj['error']['message'])
return None
try:
for trip in obj['ReisMogelijkheden']['ReisMogelijkheid']:
newtrip = Trip(trip, requested_time)
trips.append(newtrip)
except TypeError:
# If no options are found, obj['ReisMogelijkheden'] is None
return None
return trips
def get_trips(self, timestamp, start, via, destination, departure=True, prev_advices=1, next_advices=1):
"""
Fetch trip possibilities for these parameters
http://webservices.ns.nl/ns-api-treinplanner?<parameters>
fromStation
toStation
dateTime: 2012-02-21T15:50
departure: true for starting at timestamp, false for arriving at timestamp
previousAdvices
nextAdvices
"""
timezonestring = '+0100'
if is_dst('Europe/Amsterdam'):
timezonestring = '+0200'
url = 'http://webservices.ns.nl/ns-api-treinplanner?'
url = url + 'fromStation=' + start
url = url + '&toStation=' + destination
if via:
url = url + '&via=' + via
if len(timestamp) == 5:
# Format of HH:MM - api needs yyyy-mm-ddThh:mm
timestamp = time.strftime("%Y-%m-%d") + 'T' + timestamp
#requested_time = datetime.strptime(timestamp, "%Y-%m-%dT%H:%M")
# TODO: DST/normal time
requested_time = load_datetime(timestamp + timezonestring, "%Y-%m-%dT%H:%M%z")
else:
#requested_time = datetime.strptime(timestamp, "%d-%m-%Y %H:%M")
requested_time = load_datetime(timestamp + timezonestring, "%d-%m-%Y %H:%M%z")
timestamp = datetime.strptime(timestamp, "%d-%m-%Y %H:%M").strftime("%Y-%m-%dT%H:%M")
url = url + '&previousAdvices=' + str(prev_advices)
url = url + '&nextAdvices=' + str(next_advices)
url = url + '&dateTime=' + timestamp
raw_trips = self._request('GET', url)
return self.parse_trips(raw_trips, requested_time)
def parse_stations(self, xml):
obj = xmltodict.parse(xml)
stations = []
for station in obj['Stations']['Station']:
newstat = Station(station)
stations.append(newstat)
print(len(stations))
return stations
def get_stations(self):
"""
Fetch the list of stations
"""
url = 'http://webservices.ns.nl/ns-api-stations-v2'
raw_stations = self._request('GET', url)
return self.parse_stations(raw_stations)
|
aquatix/ns-api
|
ns_api.py
|
list_to_json
|
python
|
def list_to_json(source_list):
result = []
for item in source_list:
result.append(item.to_json())
return result
|
Serialise all the items in source_list to json
|
train
|
https://github.com/aquatix/ns-api/blob/9b3379f8df6217132f457c4363457c16321c2448/ns_api.py#L81-L88
| null |
"""
Library to query the official Dutch railways API
"""
from __future__ import print_function
import collections
import json
import time
from datetime import datetime, timedelta
import pytz
import requests
import xmltodict
from future.utils import python_2_unicode_compatible
from pytz.tzinfo import StaticTzInfo
from requests.auth import HTTPBasicAuth
## ns-api library version
__version__ = '2.7.5'
## Date/time helpers
NS_DATETIME = "%Y-%m-%dT%H:%M:%S%z"
def datetime_to_string(timestamp, dt_format='%Y-%m-%d %H:%M:%S'):
"""
Format datetime object to string
"""
return timestamp.strftime(dt_format)
def simple_time(value):
"""
Format a datetime or timedelta object to a string of format HH:MM
"""
if isinstance(value, timedelta):
return ':'.join(str(value).split(':')[:2])
return datetime_to_string(value, '%H:%M')
## Timezone helpers
def is_dst(zonename):
"""
Find out whether it's Daylight Saving Time in this timezone
"""
tz = pytz.timezone(zonename)
now = pytz.utc.localize(datetime.utcnow())
return now.astimezone(tz).dst() != timedelta(0)
class OffsetTime(StaticTzInfo):
"""
A dumb timezone based on offset such as +0530, -0600, etc.
"""
def __init__(self, offset):
hours = int(offset[:3])
minutes = int(offset[0] + offset[3:])
self._utcoffset = timedelta(hours=hours, minutes=minutes)
def load_datetime(value, dt_format):
"""
Create timezone-aware datetime object
"""
if dt_format.endswith('%z'):
dt_format = dt_format[:-2]
offset = value[-5:]
value = value[:-5]
if offset != offset.replace(':', ''):
# strip : from HHMM if needed (isoformat() adds it between HH and MM)
offset = '+' + offset.replace(':', '')
value = value[:-1]
return OffsetTime(offset).localize(datetime.strptime(value, dt_format))
return datetime.strptime(value, dt_format)
## List helpers
def list_from_json(source_list_json):
"""
Deserialise all the items in source_list from json
"""
result = []
if source_list_json == [] or source_list_json == None:
return result
for list_item in source_list_json:
item = json.loads(list_item)
try:
if item['class_name'] == 'Departure':
temp = Departure()
elif item['class_name'] == 'Disruption':
temp = Disruption()
elif item['class_name'] == 'Station':
temp = Station()
elif item['class_name'] == 'Trip':
temp = Trip()
elif item['class_name'] == 'TripRemark':
temp = TripRemark()
elif item['class_name'] == 'TripStop':
temp = TripStop()
elif item['class_name'] == 'TripSubpart':
temp = TripSubpart()
else:
print('Unrecognised Class ' + item['class_name'] + ', skipping')
continue
temp.from_json(list_item)
result.append(temp)
except KeyError:
print('Unrecognised item with no class_name, skipping')
continue
return result
def list_diff(list_a, list_b):
"""
Return the items from list_b that differ from list_a
"""
result = []
for item in list_b:
if not item in list_a:
result.append(item)
return result
def list_same(list_a, list_b):
"""
Return the items from list_b that are also on list_a
"""
result = []
for item in list_b:
if item in list_a:
result.append(item)
return result
def list_merge(list_a, list_b):
"""
Merge two lists without duplicating items
Args:
list_a: list
list_b: list
Returns:
New list with deduplicated items from list_a and list_b
"""
#return list(collections.OrderedDict.fromkeys(list_a + list_b))
#result = list(list_b)
result = []
for item in list_a:
if not item in result:
result.append(item)
for item in list_b:
if not item in result:
result.append(item)
return result
## NS API objects
@python_2_unicode_compatible
class BaseObject(object):
"""
Base object with useful functions
"""
def __getstate__(self):
result = self.__dict__.copy()
result['class_name'] = self.__class__.__name__
return result
def to_json(self):
"""
Create a JSON representation of this model
"""
#return json.dumps(self.__getstate__())
return json.dumps(self.__getstate__(), ensure_ascii=False)
def __setstate__(self, source_dict):
if not source_dict:
# Somehow the source is None
return
del source_dict['class_name']
self.__dict__ = source_dict
def from_json(self, source_json):
"""
Parse a JSON representation of this model back to, well, the model
"""
#source_dict = json.JSONDecoder(object_pairs_hook=collections.OrderedDict).decode(source_json)
source_dict = json.JSONDecoder().decode(source_json)
self.__setstate__(source_dict)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return self.__str__()
def __str__(self):
raise NotImplementedError('subclasses must override __str__()')
class Station(BaseObject):
"""
Information on a railway station
"""
def __init__(self, stat_dict=None):
if stat_dict is None:
return
self.key = stat_dict['Code']
self.code = stat_dict['Code']
self.uic_code = stat_dict['UICCode']
self.stationtype = stat_dict['Type']
self.names = {
'short': stat_dict['Namen']['Kort'],
'middle': stat_dict['Namen']['Middel'],
'long': stat_dict['Namen']['Lang']
}
self.country = stat_dict['Land']
self.lat = stat_dict['Lat']
self.lon = stat_dict['Lon']
self.synonyms = []
try:
raw_synonyms = stat_dict['Synoniemen']['Synoniem']
if isinstance(raw_synonyms, str):
raw_synonyms = [raw_synonyms]
for synonym in raw_synonyms:
self.synonyms.append(synonym)
except TypeError:
self.synonyms = []
def __str__(self):
return u'<Station> {0} {1}'.format(self.code, self.names['long'])
class Disruption(BaseObject):
"""
Planned and unplanned disruptions of the railroad traffic
"""
def __init__(self, part_dict=None):
if part_dict is None:
return
self.key = part_dict['id']
self.line = part_dict['Traject']
self.message = part_dict['Bericht']
try:
self.reason = part_dict['Reden']
except KeyError:
self.reason = None
try:
self.cause = part_dict['Oorzaak']
except KeyError:
self.cause = None
try:
self.delay_text = part_dict['Vertraging']
except KeyError:
self.delay_text = None
try:
self.timestamp = load_datetime(part_dict['Datum'], NS_DATETIME)
except:
self.timestamp = None
def __getstate__(self):
result = super(Disruption, self).__getstate__()
result['timestamp'] = result['timestamp'].isoformat()
return result
def __setstate__(self, source_dict):
super(Disruption, self).__setstate__(source_dict)
self.timestamp = load_datetime(self.timestamp, NS_DATETIME)
def __str__(self):
return u'<Disruption> {0}'.format(self.line)
#return u'<Disruption> {0}'.format(self.key)
class Departure(BaseObject):
"""
Information on a departing train on a certain station
"""
def __init__(self, departure_dict=None):
if departure_dict is None:
return
self.key = departure_dict['RitNummer'] + '_' + departure_dict['VertrekTijd']
self.trip_number = departure_dict['RitNummer']
self.departure_time = load_datetime(departure_dict['VertrekTijd'], NS_DATETIME)
try:
self.has_delay = True
self.departure_delay = departure_dict['VertrekVertraging']
self.departure_delay_text = departure_dict['VertrekVertragingTekst']
except KeyError:
self.has_delay = False
self.departure_platform = departure_dict['VertrekSpoor']
self.departure_platform_changed = departure_dict['VertrekSpoor']['@wijziging']
self.destination = departure_dict['EindBestemming']
try:
self.route_text = departure_dict['RouteTekst']
except KeyError:
self.route_text = None
self.train_type = departure_dict = ['TreinSoort']
self.carrier = departure_dict = ['Vervoerder']
try:
self.journey_tip = departure_dict = ['ReisTip']
except KeyError:
self.journey_tip = None
try:
self.remarks = departure_dict = ['Opmerkingen']
except KeyError:
self.remarks = []
def __getstate__(self):
result = super(Departure, self).__getstate__()
result['departure_time'] = result['departure_time'].isoformat()
return result
def __setstate__(self, source_dict):
super(Departure, self).__setstate__(source_dict)
self.departure_time = load_datetime(source_dict['VertrekTijd'], NS_DATETIME)
@property
def delay(self):
"""
Return the delay of the train for this instance
"""
if self.has_delay:
return self.departure_delay
else:
return None
def __str__(self):
return u'<Departure> trip_number: {0} {1} {2}'.format(self.trip_number, self.destination, self.departure_time)
class TripRemark(BaseObject):
"""
Notes on this route, generally about disruptions
"""
def __init__(self, part_dict=None):
if part_dict is None:
return
self.key = part_dict['Id']
if part_dict['Ernstig'] == 'false':
self.is_grave = False
else:
self.is_grave = True
self.message = part_dict['Text']
def __str__(self):
return u'<TripRemark> {0} {1}'.format(self.is_grave, self.message)
class TripStop(BaseObject):
"""
Information on a stop on a route (station, time, platform)
"""
def __init__(self, part_dict=None):
if part_dict is None:
return
self.name = part_dict['Naam']
try:
self.time = load_datetime(part_dict['Tijd'], NS_DATETIME)
self.key = simple_time(self.time) + '_' + self.name
except TypeError:
# In some rare cases part_dict['Tijd'] can be None
#self.time = datetime(2000, 1, 1, 0, 0, 0)
self.time = None
self.key = None
self.platform_changed = False
try:
self.platform = part_dict['Spoor']['#text']
if part_dict['Spoor']['@wijziging'] == 'true':
self.platform_changed = True
except KeyError:
self.platform = None
try:
self.delay = part_dict['VertrekVertraging']
except KeyError:
self.delay = None
def __getstate__(self):
result = super(TripStop, self).__getstate__()
result['time'] = result['time'].isoformat()
return result
def __setstate__(self, source_dict):
super(TripStop, self).__setstate__(source_dict)
self.time = load_datetime(self.time, NS_DATETIME)
def __str__(self):
return u'<TripStop> {0}'.format(self.name)
class TripSubpart(BaseObject):
"""
Sub route; each part means a transfer
"""
def __init__(self, part_dict=None):
if part_dict is None:
return
self.trip_type = part_dict['@reisSoort']
self.transporter = part_dict['Vervoerder']
self.transport_type = part_dict['VervoerType']
self.journey_id = part_dict['RitNummer']
# VOLGENS-PLAN, GEANNULEERD (=vervallen trein), GEWIJZIGD (=planaanpassing in de bijsturing op de dag zelf),
# OVERSTAP-NIET-MOGELIJK, VERTRAAGD, NIEUW (=extra trein)
self.status = part_dict['Status']
self.going = True
self.has_delay = False
if self.status == 'GEANNULEERD':
self.going = False
if self.status == 'GEANNULEERD' or self.status == 'GEWIJZIGD' or self.status == 'VERTRAAGD':
self.has_delay = True
try:
self.disruption_key = part_dict['OngeplandeStoringId']
except KeyError:
self.disruption_key = None
self.stops = []
raw_stops = part_dict['ReisStop']
for raw_stop in raw_stops:
stop = TripStop(raw_stop)
self.stops.append(stop)
@property
def destination(self):
return self.stops[-1].name
@property
def departure_time(self):
return self.stops[0].time
def has_departure_delay(self, arrival_check=True):
if arrival_check==False and self.has_delay:
# Check whether one or more stops have delay, except last one
delay_found = False
for stop in self.stops:
if stop.delay and stop:
delay_found = True
elif stop.delay == False and stop == self.stops[-1]:
# Last stop and it doesn't have a delay
return delay_found
else:
return self.has_delay
def __getstate__(self):
result = super(TripSubpart, self).__getstate__()
stops = []
for stop in self.stops:
stops.append(stop.to_json())
result['stops'] = stops
return result
def __setstate__(self, source_dict):
super(TripSubpart, self).__setstate__(source_dict)
trip_stops = []
for raw_stop in self.stops:
trip_stop = TripStop()
trip_stop.from_json(raw_stop)
trip_stops.append(trip_stop)
self.stops = trip_stops
def __str__(self):
return u'<TripSubpart> [{0}] {1} {2} {3} {4}'.format(self.going, self.journey_id, self.trip_type, self.transport_type, self.status)
class Trip(BaseObject):
"""
Suggested route for the provided departure/destination combination
"""
def __init__(self, trip_dict=None, datetime=None):
if trip_dict is None:
return
# self.key = ??
try:
# VOLGENS-PLAN, GEWIJZIGD, VERTRAAGD, NIEUW, NIET-OPTIMAAL, NIET-MOGELIJK, PLAN-GEWIJZIGD
self.status = trip_dict['Status']
except KeyError:
self.status = None
self.nr_transfers = trip_dict['AantalOverstappen']
try:
self.travel_time_planned = trip_dict['GeplandeReisTijd']
self.going = True
except KeyError:
# Train has been cancelled
self.travel_time_planned = None
self.going = False
if self.status == 'NIET-MOGELIJK':
# Train has been cancelled
self.going = False
self.travel_time_actual = trip_dict['ActueleReisTijd']
self.is_optimal = True if trip_dict['Optimaal'] == 'true' else False
dt_format = "%Y-%m-%dT%H:%M:%S%z"
self.requested_time = datetime
try:
self.departure_time_planned = load_datetime(trip_dict['GeplandeVertrekTijd'], dt_format)
except:
self.departure_time_planned = None
try:
self.departure_time_actual = load_datetime(trip_dict['ActueleVertrekTijd'], dt_format)
except:
self.departure_time_actual = None
try:
self.arrival_time_planned = load_datetime(trip_dict['GeplandeAankomstTijd'], dt_format)
except:
self.arrival_time_planned = None
try:
self.arrival_time_actual = load_datetime(trip_dict['ActueleAankomstTijd'], dt_format)
except:
self.arrival_time_actual = None
self.trip_parts = []
raw_parts = trip_dict['ReisDeel']
if isinstance(trip_dict['ReisDeel'], collections.OrderedDict):
raw_parts = [trip_dict['ReisDeel']]
for part in raw_parts:
trip_part = TripSubpart(part)
self.trip_parts.append(trip_part)
try:
raw_remarks = trip_dict['Melding']
self.trip_remarks = []
if isinstance(raw_remarks, collections.OrderedDict):
raw_remarks = [raw_remarks]
for remark in raw_remarks:
trip_remark = TripRemark(remark)
self.trip_remarks.append(trip_remark)
except KeyError:
self.trip_remarks = []
@property
def departure(self):
return self.trip_parts[0].stops[0].name
@property
def destination(self):
return self.trip_parts[-1].stops[-1].name
@property
def delay(self):
"""
Return the delay of the train for this instance
"""
delay = {'departure_time': None, 'departure_delay': None, 'requested_differs': None,
'remarks': self.trip_remarks, 'parts': []}
if self.departure_time_actual > self.departure_time_planned:
delay['departure_delay'] = self.departure_time_actual - self.departure_time_planned
delay['departure_time'] = self.departure_time_actual
if self.requested_time != self.departure_time_actual:
delay['requested_differs'] = self.departure_time_actual
for part in self.trip_parts:
if part.has_delay:
delay['parts'].append(part)
return delay
def has_delay(self, arrival_check=True):
if self.status != 'VOLGENS-PLAN':
return True
for subpart in self.trip_parts:
if subpart.has_delay:
if subpart == self.trip_parts[-1]:
# Is last part of the trip, check if it is only the arrival
return subpart.has_departure_delay(arrival_check)
return True
if self.requested_time != self.departure_time_actual:
return True
return False
def has_departure_delay(self, subpartcheck=True):
"""
Deprecated
"""
if self.status != 'VOLGENS-PLAN':
return True
if subpartcheck and self.trip_parts[0].has_delay:
return True
if self.requested_time != self.departure_time_actual:
return True
return False
def __getstate__(self):
result = super(Trip, self).__getstate__()
result['requested_time'] = result['requested_time'].isoformat()
result['departure_time_actual'] = result['departure_time_actual'].isoformat()
result['arrival_time_actual'] = result['arrival_time_actual'].isoformat()
result['departure_time_planned'] = result['departure_time_planned'].isoformat()
result['arrival_time_planned'] = result['arrival_time_planned'].isoformat()
trip_parts = []
for trip_part in result['trip_parts']:
trip_parts.append(trip_part.to_json())
result['trip_parts'] = trip_parts
trip_remarks = []
for trip_remark in result['trip_remarks']:
trip_remarks.append(trip_remark.to_json())
result['trip_remarks'] = trip_remarks
return result
def __setstate__(self, source_dict):
super(Trip, self).__setstate__(source_dict)
# TripSubpart deserialisation
trip_parts = []
subparts = self.trip_parts
for part in subparts:
subpart = TripSubpart()
subpart.from_json(part)
trip_parts.append(subpart)
self.trip_parts = trip_parts
# TripRemark deserialisation
trip_remarks = []
remarks = self.trip_remarks
for raw_remark in remarks:
remark = TripRemark()
remark.from_json(raw_remark)
trip_remarks.append(remark)
self.trip_remarks = trip_remarks
# Datetime stamps
self.departure_time_planned = load_datetime(self.departure_time_planned, NS_DATETIME)
self.departure_time_actual = load_datetime(self.departure_time_actual, NS_DATETIME)
self.arrival_time_planned = load_datetime(self.arrival_time_planned, NS_DATETIME)
self.arrival_time_actual = load_datetime(self.arrival_time_actual, NS_DATETIME)
self.requested_time = load_datetime(self.requested_time, NS_DATETIME)
def delay_text(self):
"""
If trip has delays, format a natural language summary
"""
# TODO implement
pass
@classmethod
def get_actual(cls, trip_list, time):
"""
Look for the train actually leaving at time
"""
for trip in trip_list:
if simple_time(trip.departure_time_planned) == time:
return trip
return None
@classmethod
def get_optimal(cls, trip_list):
"""
Look for the optimal trip in the list
"""
for trip in trip_list:
if trip.is_optimal:
return trip
return None
def __str__(self):
return u'<Trip> {0} plan: {1} actual: {2} transfers: {3}'.format(self.has_delay, self.departure_time_planned, self.departure_time_actual, self.nr_transfers)
class NSAPI(object):
"""
NS API object
Library to query the official Dutch railways API
"""
def __init__(self, username, apikey):
self.username = username
self.apikey = apikey
def _request(self, method, url, postdata=None, params=None):
headers = {"Accept": "application/xml",
"Content-Type": "application/xml",
"User-Agent": "ns_api"}
if postdata:
postdata = json.dumps(postdata)
r = requests.request(method,
url,
data=postdata,
params=params,
headers=headers,
files=None,
auth=HTTPBasicAuth(self.username, self.apikey))
r.encoding = 'utf-8'
r.raise_for_status()
return r.text
def parse_disruptions(self, xml):
"""
Parse the NS API xml result into Disruption objects
@param xml: raw XML result from the NS API
"""
obj = xmltodict.parse(xml)
disruptions = {}
disruptions['unplanned'] = []
disruptions['planned'] = []
if obj['Storingen']['Ongepland']:
raw_disruptions = obj['Storingen']['Ongepland']['Storing']
if isinstance(raw_disruptions, collections.OrderedDict):
raw_disruptions = [raw_disruptions]
for disruption in raw_disruptions:
newdis = Disruption(disruption)
#print(newdis.__dict__)
disruptions['unplanned'].append(newdis)
if obj['Storingen']['Gepland']:
raw_disruptions = obj['Storingen']['Gepland']['Storing']
if isinstance(raw_disruptions, collections.OrderedDict):
raw_disruptions = [raw_disruptions]
for disruption in raw_disruptions:
newdis = Disruption(disruption)
#print(newdis.__dict__)
disruptions['planned'].append(newdis)
return disruptions
def get_disruptions(self, station=None, actual=True, unplanned=True):
"""
Fetch the current disruptions, or even the planned ones
@param station: station to lookup
@param actual: only actual disruptions, or a
actuele storingen (=ongeplande storingen + actuele werkzaamheden)
geplande werkzaamheden (=geplande werkzaamheden)
actuele storingen voor een gespecificeerd station (=ongeplande storingen + actuele werkzaamheden)
"""
url = "http://webservices.ns.nl/ns-api-storingen?station=${Stationsnaam}&actual=${true or false}&unplanned=${true or false}"
url = "http://webservices.ns.nl/ns-api-storingen?actual=true&unplanned=true"
raw_disruptions = self._request('GET', url)
return self.parse_disruptions(raw_disruptions)
def parse_departures(self, xml):
"""
Parse the NS API xml result into Departure objects
@param xml: raw XML result from the NS API
"""
obj = xmltodict.parse(xml)
departures = []
for departure in obj['ActueleVertrekTijden']['VertrekkendeTrein']:
newdep = Departure(departure)
departures.append(newdep)
#print('-- dep --')
#print(newdep.__dict__)
#print(newdep.to_json())
print(newdep.delay)
return departures
def get_departures(self, station):
"""
Fetch the current departure times from this station
http://webservices.ns.nl/ns-api-avt?station=${Naam of afkorting Station}
@param station: station to lookup
"""
url = 'http://webservices.ns.nl/ns-api-avt?station=' + station
raw_departures = self._request('GET', url)
return self.parse_departures(raw_departures)
def parse_trips(self, xml, requested_time):
"""
Parse the NS API xml result into Trip objects
"""
obj = xmltodict.parse(xml)
trips = []
if 'error' in obj:
print('Error in trips: ' + obj['error']['message'])
return None
try:
for trip in obj['ReisMogelijkheden']['ReisMogelijkheid']:
newtrip = Trip(trip, requested_time)
trips.append(newtrip)
except TypeError:
# If no options are found, obj['ReisMogelijkheden'] is None
return None
return trips
def get_trips(self, timestamp, start, via, destination, departure=True, prev_advices=1, next_advices=1):
"""
Fetch trip possibilities for these parameters
http://webservices.ns.nl/ns-api-treinplanner?<parameters>
fromStation
toStation
dateTime: 2012-02-21T15:50
departure: true for starting at timestamp, false for arriving at timestamp
previousAdvices
nextAdvices
"""
timezonestring = '+0100'
if is_dst('Europe/Amsterdam'):
timezonestring = '+0200'
url = 'http://webservices.ns.nl/ns-api-treinplanner?'
url = url + 'fromStation=' + start
url = url + '&toStation=' + destination
if via:
url = url + '&via=' + via
if len(timestamp) == 5:
# Format of HH:MM - api needs yyyy-mm-ddThh:mm
timestamp = time.strftime("%Y-%m-%d") + 'T' + timestamp
#requested_time = datetime.strptime(timestamp, "%Y-%m-%dT%H:%M")
# TODO: DST/normal time
requested_time = load_datetime(timestamp + timezonestring, "%Y-%m-%dT%H:%M%z")
else:
#requested_time = datetime.strptime(timestamp, "%d-%m-%Y %H:%M")
requested_time = load_datetime(timestamp + timezonestring, "%d-%m-%Y %H:%M%z")
timestamp = datetime.strptime(timestamp, "%d-%m-%Y %H:%M").strftime("%Y-%m-%dT%H:%M")
url = url + '&previousAdvices=' + str(prev_advices)
url = url + '&nextAdvices=' + str(next_advices)
url = url + '&dateTime=' + timestamp
raw_trips = self._request('GET', url)
return self.parse_trips(raw_trips, requested_time)
def parse_stations(self, xml):
obj = xmltodict.parse(xml)
stations = []
for station in obj['Stations']['Station']:
newstat = Station(station)
stations.append(newstat)
print(len(stations))
return stations
def get_stations(self):
"""
Fetch the list of stations
"""
url = 'http://webservices.ns.nl/ns-api-stations-v2'
raw_stations = self._request('GET', url)
return self.parse_stations(raw_stations)
|
aquatix/ns-api
|
ns_api.py
|
list_from_json
|
python
|
def list_from_json(source_list_json):
result = []
if source_list_json == [] or source_list_json == None:
return result
for list_item in source_list_json:
item = json.loads(list_item)
try:
if item['class_name'] == 'Departure':
temp = Departure()
elif item['class_name'] == 'Disruption':
temp = Disruption()
elif item['class_name'] == 'Station':
temp = Station()
elif item['class_name'] == 'Trip':
temp = Trip()
elif item['class_name'] == 'TripRemark':
temp = TripRemark()
elif item['class_name'] == 'TripStop':
temp = TripStop()
elif item['class_name'] == 'TripSubpart':
temp = TripSubpart()
else:
print('Unrecognised Class ' + item['class_name'] + ', skipping')
continue
temp.from_json(list_item)
result.append(temp)
except KeyError:
print('Unrecognised item with no class_name, skipping')
continue
return result
|
Deserialise all the items in source_list from json
|
train
|
https://github.com/aquatix/ns-api/blob/9b3379f8df6217132f457c4363457c16321c2448/ns_api.py#L91-L123
| null |
"""
Library to query the official Dutch railways API
"""
from __future__ import print_function
import collections
import json
import time
from datetime import datetime, timedelta
import pytz
import requests
import xmltodict
from future.utils import python_2_unicode_compatible
from pytz.tzinfo import StaticTzInfo
from requests.auth import HTTPBasicAuth
## ns-api library version
__version__ = '2.7.5'
## Date/time helpers
NS_DATETIME = "%Y-%m-%dT%H:%M:%S%z"
def datetime_to_string(timestamp, dt_format='%Y-%m-%d %H:%M:%S'):
"""
Format datetime object to string
"""
return timestamp.strftime(dt_format)
def simple_time(value):
"""
Format a datetime or timedelta object to a string of format HH:MM
"""
if isinstance(value, timedelta):
return ':'.join(str(value).split(':')[:2])
return datetime_to_string(value, '%H:%M')
## Timezone helpers
def is_dst(zonename):
"""
Find out whether it's Daylight Saving Time in this timezone
"""
tz = pytz.timezone(zonename)
now = pytz.utc.localize(datetime.utcnow())
return now.astimezone(tz).dst() != timedelta(0)
class OffsetTime(StaticTzInfo):
"""
A dumb timezone based on offset such as +0530, -0600, etc.
"""
def __init__(self, offset):
hours = int(offset[:3])
minutes = int(offset[0] + offset[3:])
self._utcoffset = timedelta(hours=hours, minutes=minutes)
def load_datetime(value, dt_format):
"""
Create timezone-aware datetime object
"""
if dt_format.endswith('%z'):
dt_format = dt_format[:-2]
offset = value[-5:]
value = value[:-5]
if offset != offset.replace(':', ''):
# strip : from HHMM if needed (isoformat() adds it between HH and MM)
offset = '+' + offset.replace(':', '')
value = value[:-1]
return OffsetTime(offset).localize(datetime.strptime(value, dt_format))
return datetime.strptime(value, dt_format)
## List helpers
def list_to_json(source_list):
"""
Serialise all the items in source_list to json
"""
result = []
for item in source_list:
result.append(item.to_json())
return result
def list_diff(list_a, list_b):
"""
Return the items from list_b that differ from list_a
"""
result = []
for item in list_b:
if not item in list_a:
result.append(item)
return result
def list_same(list_a, list_b):
"""
Return the items from list_b that are also on list_a
"""
result = []
for item in list_b:
if item in list_a:
result.append(item)
return result
def list_merge(list_a, list_b):
"""
Merge two lists without duplicating items
Args:
list_a: list
list_b: list
Returns:
New list with deduplicated items from list_a and list_b
"""
#return list(collections.OrderedDict.fromkeys(list_a + list_b))
#result = list(list_b)
result = []
for item in list_a:
if not item in result:
result.append(item)
for item in list_b:
if not item in result:
result.append(item)
return result
## NS API objects
@python_2_unicode_compatible
class BaseObject(object):
"""
Base object with useful functions
"""
def __getstate__(self):
result = self.__dict__.copy()
result['class_name'] = self.__class__.__name__
return result
def to_json(self):
"""
Create a JSON representation of this model
"""
#return json.dumps(self.__getstate__())
return json.dumps(self.__getstate__(), ensure_ascii=False)
def __setstate__(self, source_dict):
if not source_dict:
# Somehow the source is None
return
del source_dict['class_name']
self.__dict__ = source_dict
def from_json(self, source_json):
"""
Parse a JSON representation of this model back to, well, the model
"""
#source_dict = json.JSONDecoder(object_pairs_hook=collections.OrderedDict).decode(source_json)
source_dict = json.JSONDecoder().decode(source_json)
self.__setstate__(source_dict)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return self.__str__()
def __str__(self):
raise NotImplementedError('subclasses must override __str__()')
class Station(BaseObject):
"""
Information on a railway station
"""
def __init__(self, stat_dict=None):
if stat_dict is None:
return
self.key = stat_dict['Code']
self.code = stat_dict['Code']
self.uic_code = stat_dict['UICCode']
self.stationtype = stat_dict['Type']
self.names = {
'short': stat_dict['Namen']['Kort'],
'middle': stat_dict['Namen']['Middel'],
'long': stat_dict['Namen']['Lang']
}
self.country = stat_dict['Land']
self.lat = stat_dict['Lat']
self.lon = stat_dict['Lon']
self.synonyms = []
try:
raw_synonyms = stat_dict['Synoniemen']['Synoniem']
if isinstance(raw_synonyms, str):
raw_synonyms = [raw_synonyms]
for synonym in raw_synonyms:
self.synonyms.append(synonym)
except TypeError:
self.synonyms = []
def __str__(self):
return u'<Station> {0} {1}'.format(self.code, self.names['long'])
class Disruption(BaseObject):
"""
Planned and unplanned disruptions of the railroad traffic
"""
def __init__(self, part_dict=None):
if part_dict is None:
return
self.key = part_dict['id']
self.line = part_dict['Traject']
self.message = part_dict['Bericht']
try:
self.reason = part_dict['Reden']
except KeyError:
self.reason = None
try:
self.cause = part_dict['Oorzaak']
except KeyError:
self.cause = None
try:
self.delay_text = part_dict['Vertraging']
except KeyError:
self.delay_text = None
try:
self.timestamp = load_datetime(part_dict['Datum'], NS_DATETIME)
except:
self.timestamp = None
def __getstate__(self):
result = super(Disruption, self).__getstate__()
result['timestamp'] = result['timestamp'].isoformat()
return result
def __setstate__(self, source_dict):
super(Disruption, self).__setstate__(source_dict)
self.timestamp = load_datetime(self.timestamp, NS_DATETIME)
def __str__(self):
return u'<Disruption> {0}'.format(self.line)
#return u'<Disruption> {0}'.format(self.key)
class Departure(BaseObject):
"""
Information on a departing train on a certain station
"""
def __init__(self, departure_dict=None):
if departure_dict is None:
return
self.key = departure_dict['RitNummer'] + '_' + departure_dict['VertrekTijd']
self.trip_number = departure_dict['RitNummer']
self.departure_time = load_datetime(departure_dict['VertrekTijd'], NS_DATETIME)
try:
self.has_delay = True
self.departure_delay = departure_dict['VertrekVertraging']
self.departure_delay_text = departure_dict['VertrekVertragingTekst']
except KeyError:
self.has_delay = False
self.departure_platform = departure_dict['VertrekSpoor']
self.departure_platform_changed = departure_dict['VertrekSpoor']['@wijziging']
self.destination = departure_dict['EindBestemming']
try:
self.route_text = departure_dict['RouteTekst']
except KeyError:
self.route_text = None
self.train_type = departure_dict = ['TreinSoort']
self.carrier = departure_dict = ['Vervoerder']
try:
self.journey_tip = departure_dict = ['ReisTip']
except KeyError:
self.journey_tip = None
try:
self.remarks = departure_dict = ['Opmerkingen']
except KeyError:
self.remarks = []
def __getstate__(self):
result = super(Departure, self).__getstate__()
result['departure_time'] = result['departure_time'].isoformat()
return result
def __setstate__(self, source_dict):
super(Departure, self).__setstate__(source_dict)
self.departure_time = load_datetime(source_dict['VertrekTijd'], NS_DATETIME)
@property
def delay(self):
"""
Return the delay of the train for this instance
"""
if self.has_delay:
return self.departure_delay
else:
return None
def __str__(self):
return u'<Departure> trip_number: {0} {1} {2}'.format(self.trip_number, self.destination, self.departure_time)
class TripRemark(BaseObject):
"""
Notes on this route, generally about disruptions
"""
def __init__(self, part_dict=None):
if part_dict is None:
return
self.key = part_dict['Id']
if part_dict['Ernstig'] == 'false':
self.is_grave = False
else:
self.is_grave = True
self.message = part_dict['Text']
def __str__(self):
return u'<TripRemark> {0} {1}'.format(self.is_grave, self.message)
class TripStop(BaseObject):
"""
Information on a stop on a route (station, time, platform)
"""
def __init__(self, part_dict=None):
if part_dict is None:
return
self.name = part_dict['Naam']
try:
self.time = load_datetime(part_dict['Tijd'], NS_DATETIME)
self.key = simple_time(self.time) + '_' + self.name
except TypeError:
# In some rare cases part_dict['Tijd'] can be None
#self.time = datetime(2000, 1, 1, 0, 0, 0)
self.time = None
self.key = None
self.platform_changed = False
try:
self.platform = part_dict['Spoor']['#text']
if part_dict['Spoor']['@wijziging'] == 'true':
self.platform_changed = True
except KeyError:
self.platform = None
try:
self.delay = part_dict['VertrekVertraging']
except KeyError:
self.delay = None
def __getstate__(self):
result = super(TripStop, self).__getstate__()
result['time'] = result['time'].isoformat()
return result
def __setstate__(self, source_dict):
super(TripStop, self).__setstate__(source_dict)
self.time = load_datetime(self.time, NS_DATETIME)
def __str__(self):
return u'<TripStop> {0}'.format(self.name)
class TripSubpart(BaseObject):
"""
Sub route; each part means a transfer
"""
def __init__(self, part_dict=None):
if part_dict is None:
return
self.trip_type = part_dict['@reisSoort']
self.transporter = part_dict['Vervoerder']
self.transport_type = part_dict['VervoerType']
self.journey_id = part_dict['RitNummer']
# VOLGENS-PLAN, GEANNULEERD (=vervallen trein), GEWIJZIGD (=planaanpassing in de bijsturing op de dag zelf),
# OVERSTAP-NIET-MOGELIJK, VERTRAAGD, NIEUW (=extra trein)
self.status = part_dict['Status']
self.going = True
self.has_delay = False
if self.status == 'GEANNULEERD':
self.going = False
if self.status == 'GEANNULEERD' or self.status == 'GEWIJZIGD' or self.status == 'VERTRAAGD':
self.has_delay = True
try:
self.disruption_key = part_dict['OngeplandeStoringId']
except KeyError:
self.disruption_key = None
self.stops = []
raw_stops = part_dict['ReisStop']
for raw_stop in raw_stops:
stop = TripStop(raw_stop)
self.stops.append(stop)
@property
def destination(self):
return self.stops[-1].name
@property
def departure_time(self):
return self.stops[0].time
def has_departure_delay(self, arrival_check=True):
if arrival_check==False and self.has_delay:
# Check whether one or more stops have delay, except last one
delay_found = False
for stop in self.stops:
if stop.delay and stop:
delay_found = True
elif stop.delay == False and stop == self.stops[-1]:
# Last stop and it doesn't have a delay
return delay_found
else:
return self.has_delay
def __getstate__(self):
result = super(TripSubpart, self).__getstate__()
stops = []
for stop in self.stops:
stops.append(stop.to_json())
result['stops'] = stops
return result
def __setstate__(self, source_dict):
super(TripSubpart, self).__setstate__(source_dict)
trip_stops = []
for raw_stop in self.stops:
trip_stop = TripStop()
trip_stop.from_json(raw_stop)
trip_stops.append(trip_stop)
self.stops = trip_stops
def __str__(self):
return u'<TripSubpart> [{0}] {1} {2} {3} {4}'.format(self.going, self.journey_id, self.trip_type, self.transport_type, self.status)
class Trip(BaseObject):
"""
Suggested route for the provided departure/destination combination
"""
def __init__(self, trip_dict=None, datetime=None):
if trip_dict is None:
return
# self.key = ??
try:
# VOLGENS-PLAN, GEWIJZIGD, VERTRAAGD, NIEUW, NIET-OPTIMAAL, NIET-MOGELIJK, PLAN-GEWIJZIGD
self.status = trip_dict['Status']
except KeyError:
self.status = None
self.nr_transfers = trip_dict['AantalOverstappen']
try:
self.travel_time_planned = trip_dict['GeplandeReisTijd']
self.going = True
except KeyError:
# Train has been cancelled
self.travel_time_planned = None
self.going = False
if self.status == 'NIET-MOGELIJK':
# Train has been cancelled
self.going = False
self.travel_time_actual = trip_dict['ActueleReisTijd']
self.is_optimal = True if trip_dict['Optimaal'] == 'true' else False
dt_format = "%Y-%m-%dT%H:%M:%S%z"
self.requested_time = datetime
try:
self.departure_time_planned = load_datetime(trip_dict['GeplandeVertrekTijd'], dt_format)
except:
self.departure_time_planned = None
try:
self.departure_time_actual = load_datetime(trip_dict['ActueleVertrekTijd'], dt_format)
except:
self.departure_time_actual = None
try:
self.arrival_time_planned = load_datetime(trip_dict['GeplandeAankomstTijd'], dt_format)
except:
self.arrival_time_planned = None
try:
self.arrival_time_actual = load_datetime(trip_dict['ActueleAankomstTijd'], dt_format)
except:
self.arrival_time_actual = None
self.trip_parts = []
raw_parts = trip_dict['ReisDeel']
if isinstance(trip_dict['ReisDeel'], collections.OrderedDict):
raw_parts = [trip_dict['ReisDeel']]
for part in raw_parts:
trip_part = TripSubpart(part)
self.trip_parts.append(trip_part)
try:
raw_remarks = trip_dict['Melding']
self.trip_remarks = []
if isinstance(raw_remarks, collections.OrderedDict):
raw_remarks = [raw_remarks]
for remark in raw_remarks:
trip_remark = TripRemark(remark)
self.trip_remarks.append(trip_remark)
except KeyError:
self.trip_remarks = []
@property
def departure(self):
return self.trip_parts[0].stops[0].name
@property
def destination(self):
return self.trip_parts[-1].stops[-1].name
@property
def delay(self):
"""
Return the delay of the train for this instance
"""
delay = {'departure_time': None, 'departure_delay': None, 'requested_differs': None,
'remarks': self.trip_remarks, 'parts': []}
if self.departure_time_actual > self.departure_time_planned:
delay['departure_delay'] = self.departure_time_actual - self.departure_time_planned
delay['departure_time'] = self.departure_time_actual
if self.requested_time != self.departure_time_actual:
delay['requested_differs'] = self.departure_time_actual
for part in self.trip_parts:
if part.has_delay:
delay['parts'].append(part)
return delay
def has_delay(self, arrival_check=True):
if self.status != 'VOLGENS-PLAN':
return True
for subpart in self.trip_parts:
if subpart.has_delay:
if subpart == self.trip_parts[-1]:
# Is last part of the trip, check if it is only the arrival
return subpart.has_departure_delay(arrival_check)
return True
if self.requested_time != self.departure_time_actual:
return True
return False
def has_departure_delay(self, subpartcheck=True):
"""
Deprecated
"""
if self.status != 'VOLGENS-PLAN':
return True
if subpartcheck and self.trip_parts[0].has_delay:
return True
if self.requested_time != self.departure_time_actual:
return True
return False
def __getstate__(self):
result = super(Trip, self).__getstate__()
result['requested_time'] = result['requested_time'].isoformat()
result['departure_time_actual'] = result['departure_time_actual'].isoformat()
result['arrival_time_actual'] = result['arrival_time_actual'].isoformat()
result['departure_time_planned'] = result['departure_time_planned'].isoformat()
result['arrival_time_planned'] = result['arrival_time_planned'].isoformat()
trip_parts = []
for trip_part in result['trip_parts']:
trip_parts.append(trip_part.to_json())
result['trip_parts'] = trip_parts
trip_remarks = []
for trip_remark in result['trip_remarks']:
trip_remarks.append(trip_remark.to_json())
result['trip_remarks'] = trip_remarks
return result
def __setstate__(self, source_dict):
super(Trip, self).__setstate__(source_dict)
# TripSubpart deserialisation
trip_parts = []
subparts = self.trip_parts
for part in subparts:
subpart = TripSubpart()
subpart.from_json(part)
trip_parts.append(subpart)
self.trip_parts = trip_parts
# TripRemark deserialisation
trip_remarks = []
remarks = self.trip_remarks
for raw_remark in remarks:
remark = TripRemark()
remark.from_json(raw_remark)
trip_remarks.append(remark)
self.trip_remarks = trip_remarks
# Datetime stamps
self.departure_time_planned = load_datetime(self.departure_time_planned, NS_DATETIME)
self.departure_time_actual = load_datetime(self.departure_time_actual, NS_DATETIME)
self.arrival_time_planned = load_datetime(self.arrival_time_planned, NS_DATETIME)
self.arrival_time_actual = load_datetime(self.arrival_time_actual, NS_DATETIME)
self.requested_time = load_datetime(self.requested_time, NS_DATETIME)
def delay_text(self):
"""
If trip has delays, format a natural language summary
"""
# TODO implement
pass
@classmethod
def get_actual(cls, trip_list, time):
"""
Look for the train actually leaving at time
"""
for trip in trip_list:
if simple_time(trip.departure_time_planned) == time:
return trip
return None
@classmethod
def get_optimal(cls, trip_list):
"""
Look for the optimal trip in the list
"""
for trip in trip_list:
if trip.is_optimal:
return trip
return None
def __str__(self):
return u'<Trip> {0} plan: {1} actual: {2} transfers: {3}'.format(self.has_delay, self.departure_time_planned, self.departure_time_actual, self.nr_transfers)
class NSAPI(object):
"""
NS API object
Library to query the official Dutch railways API
"""
def __init__(self, username, apikey):
self.username = username
self.apikey = apikey
def _request(self, method, url, postdata=None, params=None):
headers = {"Accept": "application/xml",
"Content-Type": "application/xml",
"User-Agent": "ns_api"}
if postdata:
postdata = json.dumps(postdata)
r = requests.request(method,
url,
data=postdata,
params=params,
headers=headers,
files=None,
auth=HTTPBasicAuth(self.username, self.apikey))
r.encoding = 'utf-8'
r.raise_for_status()
return r.text
def parse_disruptions(self, xml):
"""
Parse the NS API xml result into Disruption objects
@param xml: raw XML result from the NS API
"""
obj = xmltodict.parse(xml)
disruptions = {}
disruptions['unplanned'] = []
disruptions['planned'] = []
if obj['Storingen']['Ongepland']:
raw_disruptions = obj['Storingen']['Ongepland']['Storing']
if isinstance(raw_disruptions, collections.OrderedDict):
raw_disruptions = [raw_disruptions]
for disruption in raw_disruptions:
newdis = Disruption(disruption)
#print(newdis.__dict__)
disruptions['unplanned'].append(newdis)
if obj['Storingen']['Gepland']:
raw_disruptions = obj['Storingen']['Gepland']['Storing']
if isinstance(raw_disruptions, collections.OrderedDict):
raw_disruptions = [raw_disruptions]
for disruption in raw_disruptions:
newdis = Disruption(disruption)
#print(newdis.__dict__)
disruptions['planned'].append(newdis)
return disruptions
def get_disruptions(self, station=None, actual=True, unplanned=True):
"""
Fetch the current disruptions, or even the planned ones
@param station: station to lookup
@param actual: only actual disruptions, or a
actuele storingen (=ongeplande storingen + actuele werkzaamheden)
geplande werkzaamheden (=geplande werkzaamheden)
actuele storingen voor een gespecificeerd station (=ongeplande storingen + actuele werkzaamheden)
"""
url = "http://webservices.ns.nl/ns-api-storingen?station=${Stationsnaam}&actual=${true or false}&unplanned=${true or false}"
url = "http://webservices.ns.nl/ns-api-storingen?actual=true&unplanned=true"
raw_disruptions = self._request('GET', url)
return self.parse_disruptions(raw_disruptions)
def parse_departures(self, xml):
"""
Parse the NS API xml result into Departure objects
@param xml: raw XML result from the NS API
"""
obj = xmltodict.parse(xml)
departures = []
for departure in obj['ActueleVertrekTijden']['VertrekkendeTrein']:
newdep = Departure(departure)
departures.append(newdep)
#print('-- dep --')
#print(newdep.__dict__)
#print(newdep.to_json())
print(newdep.delay)
return departures
def get_departures(self, station):
"""
Fetch the current departure times from this station
http://webservices.ns.nl/ns-api-avt?station=${Naam of afkorting Station}
@param station: station to lookup
"""
url = 'http://webservices.ns.nl/ns-api-avt?station=' + station
raw_departures = self._request('GET', url)
return self.parse_departures(raw_departures)
def parse_trips(self, xml, requested_time):
"""
Parse the NS API xml result into Trip objects
"""
obj = xmltodict.parse(xml)
trips = []
if 'error' in obj:
print('Error in trips: ' + obj['error']['message'])
return None
try:
for trip in obj['ReisMogelijkheden']['ReisMogelijkheid']:
newtrip = Trip(trip, requested_time)
trips.append(newtrip)
except TypeError:
# If no options are found, obj['ReisMogelijkheden'] is None
return None
return trips
def get_trips(self, timestamp, start, via, destination, departure=True, prev_advices=1, next_advices=1):
"""
Fetch trip possibilities for these parameters
http://webservices.ns.nl/ns-api-treinplanner?<parameters>
fromStation
toStation
dateTime: 2012-02-21T15:50
departure: true for starting at timestamp, false for arriving at timestamp
previousAdvices
nextAdvices
"""
timezonestring = '+0100'
if is_dst('Europe/Amsterdam'):
timezonestring = '+0200'
url = 'http://webservices.ns.nl/ns-api-treinplanner?'
url = url + 'fromStation=' + start
url = url + '&toStation=' + destination
if via:
url = url + '&via=' + via
if len(timestamp) == 5:
# Format of HH:MM - api needs yyyy-mm-ddThh:mm
timestamp = time.strftime("%Y-%m-%d") + 'T' + timestamp
#requested_time = datetime.strptime(timestamp, "%Y-%m-%dT%H:%M")
# TODO: DST/normal time
requested_time = load_datetime(timestamp + timezonestring, "%Y-%m-%dT%H:%M%z")
else:
#requested_time = datetime.strptime(timestamp, "%d-%m-%Y %H:%M")
requested_time = load_datetime(timestamp + timezonestring, "%d-%m-%Y %H:%M%z")
timestamp = datetime.strptime(timestamp, "%d-%m-%Y %H:%M").strftime("%Y-%m-%dT%H:%M")
url = url + '&previousAdvices=' + str(prev_advices)
url = url + '&nextAdvices=' + str(next_advices)
url = url + '&dateTime=' + timestamp
raw_trips = self._request('GET', url)
return self.parse_trips(raw_trips, requested_time)
def parse_stations(self, xml):
obj = xmltodict.parse(xml)
stations = []
for station in obj['Stations']['Station']:
newstat = Station(station)
stations.append(newstat)
print(len(stations))
return stations
def get_stations(self):
"""
Fetch the list of stations
"""
url = 'http://webservices.ns.nl/ns-api-stations-v2'
raw_stations = self._request('GET', url)
return self.parse_stations(raw_stations)
|
aquatix/ns-api
|
ns_api.py
|
list_diff
|
python
|
def list_diff(list_a, list_b):
result = []
for item in list_b:
if not item in list_a:
result.append(item)
return result
|
Return the items from list_b that differ from list_a
|
train
|
https://github.com/aquatix/ns-api/blob/9b3379f8df6217132f457c4363457c16321c2448/ns_api.py#L126-L134
| null |
"""
Library to query the official Dutch railways API
"""
from __future__ import print_function
import collections
import json
import time
from datetime import datetime, timedelta
import pytz
import requests
import xmltodict
from future.utils import python_2_unicode_compatible
from pytz.tzinfo import StaticTzInfo
from requests.auth import HTTPBasicAuth
## ns-api library version
__version__ = '2.7.5'
## Date/time helpers
NS_DATETIME = "%Y-%m-%dT%H:%M:%S%z"
def datetime_to_string(timestamp, dt_format='%Y-%m-%d %H:%M:%S'):
"""
Format datetime object to string
"""
return timestamp.strftime(dt_format)
def simple_time(value):
"""
Format a datetime or timedelta object to a string of format HH:MM
"""
if isinstance(value, timedelta):
return ':'.join(str(value).split(':')[:2])
return datetime_to_string(value, '%H:%M')
## Timezone helpers
def is_dst(zonename):
"""
Find out whether it's Daylight Saving Time in this timezone
"""
tz = pytz.timezone(zonename)
now = pytz.utc.localize(datetime.utcnow())
return now.astimezone(tz).dst() != timedelta(0)
class OffsetTime(StaticTzInfo):
"""
A dumb timezone based on offset such as +0530, -0600, etc.
"""
def __init__(self, offset):
hours = int(offset[:3])
minutes = int(offset[0] + offset[3:])
self._utcoffset = timedelta(hours=hours, minutes=minutes)
def load_datetime(value, dt_format):
"""
Create timezone-aware datetime object
"""
if dt_format.endswith('%z'):
dt_format = dt_format[:-2]
offset = value[-5:]
value = value[:-5]
if offset != offset.replace(':', ''):
# strip : from HHMM if needed (isoformat() adds it between HH and MM)
offset = '+' + offset.replace(':', '')
value = value[:-1]
return OffsetTime(offset).localize(datetime.strptime(value, dt_format))
return datetime.strptime(value, dt_format)
## List helpers
def list_to_json(source_list):
"""
Serialise all the items in source_list to json
"""
result = []
for item in source_list:
result.append(item.to_json())
return result
def list_from_json(source_list_json):
"""
Deserialise all the items in source_list from json
"""
result = []
if source_list_json == [] or source_list_json == None:
return result
for list_item in source_list_json:
item = json.loads(list_item)
try:
if item['class_name'] == 'Departure':
temp = Departure()
elif item['class_name'] == 'Disruption':
temp = Disruption()
elif item['class_name'] == 'Station':
temp = Station()
elif item['class_name'] == 'Trip':
temp = Trip()
elif item['class_name'] == 'TripRemark':
temp = TripRemark()
elif item['class_name'] == 'TripStop':
temp = TripStop()
elif item['class_name'] == 'TripSubpart':
temp = TripSubpart()
else:
print('Unrecognised Class ' + item['class_name'] + ', skipping')
continue
temp.from_json(list_item)
result.append(temp)
except KeyError:
print('Unrecognised item with no class_name, skipping')
continue
return result
def list_same(list_a, list_b):
"""
Return the items from list_b that are also on list_a
"""
result = []
for item in list_b:
if item in list_a:
result.append(item)
return result
def list_merge(list_a, list_b):
"""
Merge two lists without duplicating items
Args:
list_a: list
list_b: list
Returns:
New list with deduplicated items from list_a and list_b
"""
#return list(collections.OrderedDict.fromkeys(list_a + list_b))
#result = list(list_b)
result = []
for item in list_a:
if not item in result:
result.append(item)
for item in list_b:
if not item in result:
result.append(item)
return result
## NS API objects
@python_2_unicode_compatible
class BaseObject(object):
"""
Base object with useful functions
"""
def __getstate__(self):
result = self.__dict__.copy()
result['class_name'] = self.__class__.__name__
return result
def to_json(self):
"""
Create a JSON representation of this model
"""
#return json.dumps(self.__getstate__())
return json.dumps(self.__getstate__(), ensure_ascii=False)
def __setstate__(self, source_dict):
if not source_dict:
# Somehow the source is None
return
del source_dict['class_name']
self.__dict__ = source_dict
def from_json(self, source_json):
"""
Parse a JSON representation of this model back to, well, the model
"""
#source_dict = json.JSONDecoder(object_pairs_hook=collections.OrderedDict).decode(source_json)
source_dict = json.JSONDecoder().decode(source_json)
self.__setstate__(source_dict)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return self.__str__()
def __str__(self):
raise NotImplementedError('subclasses must override __str__()')
class Station(BaseObject):
"""
Information on a railway station
"""
def __init__(self, stat_dict=None):
if stat_dict is None:
return
self.key = stat_dict['Code']
self.code = stat_dict['Code']
self.uic_code = stat_dict['UICCode']
self.stationtype = stat_dict['Type']
self.names = {
'short': stat_dict['Namen']['Kort'],
'middle': stat_dict['Namen']['Middel'],
'long': stat_dict['Namen']['Lang']
}
self.country = stat_dict['Land']
self.lat = stat_dict['Lat']
self.lon = stat_dict['Lon']
self.synonyms = []
try:
raw_synonyms = stat_dict['Synoniemen']['Synoniem']
if isinstance(raw_synonyms, str):
raw_synonyms = [raw_synonyms]
for synonym in raw_synonyms:
self.synonyms.append(synonym)
except TypeError:
self.synonyms = []
def __str__(self):
return u'<Station> {0} {1}'.format(self.code, self.names['long'])
class Disruption(BaseObject):
"""
Planned and unplanned disruptions of the railroad traffic
"""
def __init__(self, part_dict=None):
if part_dict is None:
return
self.key = part_dict['id']
self.line = part_dict['Traject']
self.message = part_dict['Bericht']
try:
self.reason = part_dict['Reden']
except KeyError:
self.reason = None
try:
self.cause = part_dict['Oorzaak']
except KeyError:
self.cause = None
try:
self.delay_text = part_dict['Vertraging']
except KeyError:
self.delay_text = None
try:
self.timestamp = load_datetime(part_dict['Datum'], NS_DATETIME)
except:
self.timestamp = None
def __getstate__(self):
result = super(Disruption, self).__getstate__()
result['timestamp'] = result['timestamp'].isoformat()
return result
def __setstate__(self, source_dict):
super(Disruption, self).__setstate__(source_dict)
self.timestamp = load_datetime(self.timestamp, NS_DATETIME)
def __str__(self):
return u'<Disruption> {0}'.format(self.line)
#return u'<Disruption> {0}'.format(self.key)
class Departure(BaseObject):
"""
Information on a departing train on a certain station
"""
def __init__(self, departure_dict=None):
if departure_dict is None:
return
self.key = departure_dict['RitNummer'] + '_' + departure_dict['VertrekTijd']
self.trip_number = departure_dict['RitNummer']
self.departure_time = load_datetime(departure_dict['VertrekTijd'], NS_DATETIME)
try:
self.has_delay = True
self.departure_delay = departure_dict['VertrekVertraging']
self.departure_delay_text = departure_dict['VertrekVertragingTekst']
except KeyError:
self.has_delay = False
self.departure_platform = departure_dict['VertrekSpoor']
self.departure_platform_changed = departure_dict['VertrekSpoor']['@wijziging']
self.destination = departure_dict['EindBestemming']
try:
self.route_text = departure_dict['RouteTekst']
except KeyError:
self.route_text = None
self.train_type = departure_dict = ['TreinSoort']
self.carrier = departure_dict = ['Vervoerder']
try:
self.journey_tip = departure_dict = ['ReisTip']
except KeyError:
self.journey_tip = None
try:
self.remarks = departure_dict = ['Opmerkingen']
except KeyError:
self.remarks = []
def __getstate__(self):
result = super(Departure, self).__getstate__()
result['departure_time'] = result['departure_time'].isoformat()
return result
def __setstate__(self, source_dict):
super(Departure, self).__setstate__(source_dict)
self.departure_time = load_datetime(source_dict['VertrekTijd'], NS_DATETIME)
@property
def delay(self):
"""
Return the delay of the train for this instance
"""
if self.has_delay:
return self.departure_delay
else:
return None
def __str__(self):
return u'<Departure> trip_number: {0} {1} {2}'.format(self.trip_number, self.destination, self.departure_time)
class TripRemark(BaseObject):
"""
Notes on this route, generally about disruptions
"""
def __init__(self, part_dict=None):
if part_dict is None:
return
self.key = part_dict['Id']
if part_dict['Ernstig'] == 'false':
self.is_grave = False
else:
self.is_grave = True
self.message = part_dict['Text']
def __str__(self):
return u'<TripRemark> {0} {1}'.format(self.is_grave, self.message)
class TripStop(BaseObject):
"""
Information on a stop on a route (station, time, platform)
"""
def __init__(self, part_dict=None):
if part_dict is None:
return
self.name = part_dict['Naam']
try:
self.time = load_datetime(part_dict['Tijd'], NS_DATETIME)
self.key = simple_time(self.time) + '_' + self.name
except TypeError:
# In some rare cases part_dict['Tijd'] can be None
#self.time = datetime(2000, 1, 1, 0, 0, 0)
self.time = None
self.key = None
self.platform_changed = False
try:
self.platform = part_dict['Spoor']['#text']
if part_dict['Spoor']['@wijziging'] == 'true':
self.platform_changed = True
except KeyError:
self.platform = None
try:
self.delay = part_dict['VertrekVertraging']
except KeyError:
self.delay = None
def __getstate__(self):
result = super(TripStop, self).__getstate__()
result['time'] = result['time'].isoformat()
return result
def __setstate__(self, source_dict):
super(TripStop, self).__setstate__(source_dict)
self.time = load_datetime(self.time, NS_DATETIME)
def __str__(self):
return u'<TripStop> {0}'.format(self.name)
class TripSubpart(BaseObject):
"""
Sub route; each part means a transfer
"""
def __init__(self, part_dict=None):
if part_dict is None:
return
self.trip_type = part_dict['@reisSoort']
self.transporter = part_dict['Vervoerder']
self.transport_type = part_dict['VervoerType']
self.journey_id = part_dict['RitNummer']
# VOLGENS-PLAN, GEANNULEERD (=vervallen trein), GEWIJZIGD (=planaanpassing in de bijsturing op de dag zelf),
# OVERSTAP-NIET-MOGELIJK, VERTRAAGD, NIEUW (=extra trein)
self.status = part_dict['Status']
self.going = True
self.has_delay = False
if self.status == 'GEANNULEERD':
self.going = False
if self.status == 'GEANNULEERD' or self.status == 'GEWIJZIGD' or self.status == 'VERTRAAGD':
self.has_delay = True
try:
self.disruption_key = part_dict['OngeplandeStoringId']
except KeyError:
self.disruption_key = None
self.stops = []
raw_stops = part_dict['ReisStop']
for raw_stop in raw_stops:
stop = TripStop(raw_stop)
self.stops.append(stop)
@property
def destination(self):
return self.stops[-1].name
@property
def departure_time(self):
return self.stops[0].time
def has_departure_delay(self, arrival_check=True):
if arrival_check==False and self.has_delay:
# Check whether one or more stops have delay, except last one
delay_found = False
for stop in self.stops:
if stop.delay and stop:
delay_found = True
elif stop.delay == False and stop == self.stops[-1]:
# Last stop and it doesn't have a delay
return delay_found
else:
return self.has_delay
def __getstate__(self):
result = super(TripSubpart, self).__getstate__()
stops = []
for stop in self.stops:
stops.append(stop.to_json())
result['stops'] = stops
return result
def __setstate__(self, source_dict):
super(TripSubpart, self).__setstate__(source_dict)
trip_stops = []
for raw_stop in self.stops:
trip_stop = TripStop()
trip_stop.from_json(raw_stop)
trip_stops.append(trip_stop)
self.stops = trip_stops
def __str__(self):
return u'<TripSubpart> [{0}] {1} {2} {3} {4}'.format(self.going, self.journey_id, self.trip_type, self.transport_type, self.status)
class Trip(BaseObject):
"""
Suggested route for the provided departure/destination combination
"""
def __init__(self, trip_dict=None, datetime=None):
if trip_dict is None:
return
# self.key = ??
try:
# VOLGENS-PLAN, GEWIJZIGD, VERTRAAGD, NIEUW, NIET-OPTIMAAL, NIET-MOGELIJK, PLAN-GEWIJZIGD
self.status = trip_dict['Status']
except KeyError:
self.status = None
self.nr_transfers = trip_dict['AantalOverstappen']
try:
self.travel_time_planned = trip_dict['GeplandeReisTijd']
self.going = True
except KeyError:
# Train has been cancelled
self.travel_time_planned = None
self.going = False
if self.status == 'NIET-MOGELIJK':
# Train has been cancelled
self.going = False
self.travel_time_actual = trip_dict['ActueleReisTijd']
self.is_optimal = True if trip_dict['Optimaal'] == 'true' else False
dt_format = "%Y-%m-%dT%H:%M:%S%z"
self.requested_time = datetime
try:
self.departure_time_planned = load_datetime(trip_dict['GeplandeVertrekTijd'], dt_format)
except:
self.departure_time_planned = None
try:
self.departure_time_actual = load_datetime(trip_dict['ActueleVertrekTijd'], dt_format)
except:
self.departure_time_actual = None
try:
self.arrival_time_planned = load_datetime(trip_dict['GeplandeAankomstTijd'], dt_format)
except:
self.arrival_time_planned = None
try:
self.arrival_time_actual = load_datetime(trip_dict['ActueleAankomstTijd'], dt_format)
except:
self.arrival_time_actual = None
self.trip_parts = []
raw_parts = trip_dict['ReisDeel']
if isinstance(trip_dict['ReisDeel'], collections.OrderedDict):
raw_parts = [trip_dict['ReisDeel']]
for part in raw_parts:
trip_part = TripSubpart(part)
self.trip_parts.append(trip_part)
try:
raw_remarks = trip_dict['Melding']
self.trip_remarks = []
if isinstance(raw_remarks, collections.OrderedDict):
raw_remarks = [raw_remarks]
for remark in raw_remarks:
trip_remark = TripRemark(remark)
self.trip_remarks.append(trip_remark)
except KeyError:
self.trip_remarks = []
@property
def departure(self):
return self.trip_parts[0].stops[0].name
@property
def destination(self):
return self.trip_parts[-1].stops[-1].name
@property
def delay(self):
"""
Return the delay of the train for this instance
"""
delay = {'departure_time': None, 'departure_delay': None, 'requested_differs': None,
'remarks': self.trip_remarks, 'parts': []}
if self.departure_time_actual > self.departure_time_planned:
delay['departure_delay'] = self.departure_time_actual - self.departure_time_planned
delay['departure_time'] = self.departure_time_actual
if self.requested_time != self.departure_time_actual:
delay['requested_differs'] = self.departure_time_actual
for part in self.trip_parts:
if part.has_delay:
delay['parts'].append(part)
return delay
def has_delay(self, arrival_check=True):
if self.status != 'VOLGENS-PLAN':
return True
for subpart in self.trip_parts:
if subpart.has_delay:
if subpart == self.trip_parts[-1]:
# Is last part of the trip, check if it is only the arrival
return subpart.has_departure_delay(arrival_check)
return True
if self.requested_time != self.departure_time_actual:
return True
return False
def has_departure_delay(self, subpartcheck=True):
"""
Deprecated
"""
if self.status != 'VOLGENS-PLAN':
return True
if subpartcheck and self.trip_parts[0].has_delay:
return True
if self.requested_time != self.departure_time_actual:
return True
return False
def __getstate__(self):
result = super(Trip, self).__getstate__()
result['requested_time'] = result['requested_time'].isoformat()
result['departure_time_actual'] = result['departure_time_actual'].isoformat()
result['arrival_time_actual'] = result['arrival_time_actual'].isoformat()
result['departure_time_planned'] = result['departure_time_planned'].isoformat()
result['arrival_time_planned'] = result['arrival_time_planned'].isoformat()
trip_parts = []
for trip_part in result['trip_parts']:
trip_parts.append(trip_part.to_json())
result['trip_parts'] = trip_parts
trip_remarks = []
for trip_remark in result['trip_remarks']:
trip_remarks.append(trip_remark.to_json())
result['trip_remarks'] = trip_remarks
return result
def __setstate__(self, source_dict):
super(Trip, self).__setstate__(source_dict)
# TripSubpart deserialisation
trip_parts = []
subparts = self.trip_parts
for part in subparts:
subpart = TripSubpart()
subpart.from_json(part)
trip_parts.append(subpart)
self.trip_parts = trip_parts
# TripRemark deserialisation
trip_remarks = []
remarks = self.trip_remarks
for raw_remark in remarks:
remark = TripRemark()
remark.from_json(raw_remark)
trip_remarks.append(remark)
self.trip_remarks = trip_remarks
# Datetime stamps
self.departure_time_planned = load_datetime(self.departure_time_planned, NS_DATETIME)
self.departure_time_actual = load_datetime(self.departure_time_actual, NS_DATETIME)
self.arrival_time_planned = load_datetime(self.arrival_time_planned, NS_DATETIME)
self.arrival_time_actual = load_datetime(self.arrival_time_actual, NS_DATETIME)
self.requested_time = load_datetime(self.requested_time, NS_DATETIME)
def delay_text(self):
"""
If trip has delays, format a natural language summary
"""
# TODO implement
pass
@classmethod
def get_actual(cls, trip_list, time):
"""
Look for the train actually leaving at time
"""
for trip in trip_list:
if simple_time(trip.departure_time_planned) == time:
return trip
return None
@classmethod
def get_optimal(cls, trip_list):
"""
Look for the optimal trip in the list
"""
for trip in trip_list:
if trip.is_optimal:
return trip
return None
def __str__(self):
return u'<Trip> {0} plan: {1} actual: {2} transfers: {3}'.format(self.has_delay, self.departure_time_planned, self.departure_time_actual, self.nr_transfers)
class NSAPI(object):
"""
NS API object
Library to query the official Dutch railways API
"""
def __init__(self, username, apikey):
self.username = username
self.apikey = apikey
def _request(self, method, url, postdata=None, params=None):
headers = {"Accept": "application/xml",
"Content-Type": "application/xml",
"User-Agent": "ns_api"}
if postdata:
postdata = json.dumps(postdata)
r = requests.request(method,
url,
data=postdata,
params=params,
headers=headers,
files=None,
auth=HTTPBasicAuth(self.username, self.apikey))
r.encoding = 'utf-8'
r.raise_for_status()
return r.text
def parse_disruptions(self, xml):
"""
Parse the NS API xml result into Disruption objects
@param xml: raw XML result from the NS API
"""
obj = xmltodict.parse(xml)
disruptions = {}
disruptions['unplanned'] = []
disruptions['planned'] = []
if obj['Storingen']['Ongepland']:
raw_disruptions = obj['Storingen']['Ongepland']['Storing']
if isinstance(raw_disruptions, collections.OrderedDict):
raw_disruptions = [raw_disruptions]
for disruption in raw_disruptions:
newdis = Disruption(disruption)
#print(newdis.__dict__)
disruptions['unplanned'].append(newdis)
if obj['Storingen']['Gepland']:
raw_disruptions = obj['Storingen']['Gepland']['Storing']
if isinstance(raw_disruptions, collections.OrderedDict):
raw_disruptions = [raw_disruptions]
for disruption in raw_disruptions:
newdis = Disruption(disruption)
#print(newdis.__dict__)
disruptions['planned'].append(newdis)
return disruptions
def get_disruptions(self, station=None, actual=True, unplanned=True):
"""
Fetch the current disruptions, or even the planned ones
@param station: station to lookup
@param actual: only actual disruptions, or a
actuele storingen (=ongeplande storingen + actuele werkzaamheden)
geplande werkzaamheden (=geplande werkzaamheden)
actuele storingen voor een gespecificeerd station (=ongeplande storingen + actuele werkzaamheden)
"""
url = "http://webservices.ns.nl/ns-api-storingen?station=${Stationsnaam}&actual=${true or false}&unplanned=${true or false}"
url = "http://webservices.ns.nl/ns-api-storingen?actual=true&unplanned=true"
raw_disruptions = self._request('GET', url)
return self.parse_disruptions(raw_disruptions)
def parse_departures(self, xml):
"""
Parse the NS API xml result into Departure objects
@param xml: raw XML result from the NS API
"""
obj = xmltodict.parse(xml)
departures = []
for departure in obj['ActueleVertrekTijden']['VertrekkendeTrein']:
newdep = Departure(departure)
departures.append(newdep)
#print('-- dep --')
#print(newdep.__dict__)
#print(newdep.to_json())
print(newdep.delay)
return departures
def get_departures(self, station):
"""
Fetch the current departure times from this station
http://webservices.ns.nl/ns-api-avt?station=${Naam of afkorting Station}
@param station: station to lookup
"""
url = 'http://webservices.ns.nl/ns-api-avt?station=' + station
raw_departures = self._request('GET', url)
return self.parse_departures(raw_departures)
def parse_trips(self, xml, requested_time):
"""
Parse the NS API xml result into Trip objects
"""
obj = xmltodict.parse(xml)
trips = []
if 'error' in obj:
print('Error in trips: ' + obj['error']['message'])
return None
try:
for trip in obj['ReisMogelijkheden']['ReisMogelijkheid']:
newtrip = Trip(trip, requested_time)
trips.append(newtrip)
except TypeError:
# If no options are found, obj['ReisMogelijkheden'] is None
return None
return trips
def get_trips(self, timestamp, start, via, destination, departure=True, prev_advices=1, next_advices=1):
"""
Fetch trip possibilities for these parameters
http://webservices.ns.nl/ns-api-treinplanner?<parameters>
fromStation
toStation
dateTime: 2012-02-21T15:50
departure: true for starting at timestamp, false for arriving at timestamp
previousAdvices
nextAdvices
"""
timezonestring = '+0100'
if is_dst('Europe/Amsterdam'):
timezonestring = '+0200'
url = 'http://webservices.ns.nl/ns-api-treinplanner?'
url = url + 'fromStation=' + start
url = url + '&toStation=' + destination
if via:
url = url + '&via=' + via
if len(timestamp) == 5:
# Format of HH:MM - api needs yyyy-mm-ddThh:mm
timestamp = time.strftime("%Y-%m-%d") + 'T' + timestamp
#requested_time = datetime.strptime(timestamp, "%Y-%m-%dT%H:%M")
# TODO: DST/normal time
requested_time = load_datetime(timestamp + timezonestring, "%Y-%m-%dT%H:%M%z")
else:
#requested_time = datetime.strptime(timestamp, "%d-%m-%Y %H:%M")
requested_time = load_datetime(timestamp + timezonestring, "%d-%m-%Y %H:%M%z")
timestamp = datetime.strptime(timestamp, "%d-%m-%Y %H:%M").strftime("%Y-%m-%dT%H:%M")
url = url + '&previousAdvices=' + str(prev_advices)
url = url + '&nextAdvices=' + str(next_advices)
url = url + '&dateTime=' + timestamp
raw_trips = self._request('GET', url)
return self.parse_trips(raw_trips, requested_time)
def parse_stations(self, xml):
obj = xmltodict.parse(xml)
stations = []
for station in obj['Stations']['Station']:
newstat = Station(station)
stations.append(newstat)
print(len(stations))
return stations
def get_stations(self):
"""
Fetch the list of stations
"""
url = 'http://webservices.ns.nl/ns-api-stations-v2'
raw_stations = self._request('GET', url)
return self.parse_stations(raw_stations)
|
aquatix/ns-api
|
ns_api.py
|
list_same
|
python
|
def list_same(list_a, list_b):
result = []
for item in list_b:
if item in list_a:
result.append(item)
return result
|
Return the items from list_b that are also on list_a
|
train
|
https://github.com/aquatix/ns-api/blob/9b3379f8df6217132f457c4363457c16321c2448/ns_api.py#L137-L145
| null |
"""
Library to query the official Dutch railways API
"""
from __future__ import print_function
import collections
import json
import time
from datetime import datetime, timedelta
import pytz
import requests
import xmltodict
from future.utils import python_2_unicode_compatible
from pytz.tzinfo import StaticTzInfo
from requests.auth import HTTPBasicAuth
## ns-api library version
__version__ = '2.7.5'
## Date/time helpers
NS_DATETIME = "%Y-%m-%dT%H:%M:%S%z"
def datetime_to_string(timestamp, dt_format='%Y-%m-%d %H:%M:%S'):
"""
Format datetime object to string
"""
return timestamp.strftime(dt_format)
def simple_time(value):
"""
Format a datetime or timedelta object to a string of format HH:MM
"""
if isinstance(value, timedelta):
return ':'.join(str(value).split(':')[:2])
return datetime_to_string(value, '%H:%M')
## Timezone helpers
def is_dst(zonename):
"""
Find out whether it's Daylight Saving Time in this timezone
"""
tz = pytz.timezone(zonename)
now = pytz.utc.localize(datetime.utcnow())
return now.astimezone(tz).dst() != timedelta(0)
class OffsetTime(StaticTzInfo):
"""
A dumb timezone based on offset such as +0530, -0600, etc.
"""
def __init__(self, offset):
hours = int(offset[:3])
minutes = int(offset[0] + offset[3:])
self._utcoffset = timedelta(hours=hours, minutes=minutes)
def load_datetime(value, dt_format):
"""
Create timezone-aware datetime object
"""
if dt_format.endswith('%z'):
dt_format = dt_format[:-2]
offset = value[-5:]
value = value[:-5]
if offset != offset.replace(':', ''):
# strip : from HHMM if needed (isoformat() adds it between HH and MM)
offset = '+' + offset.replace(':', '')
value = value[:-1]
return OffsetTime(offset).localize(datetime.strptime(value, dt_format))
return datetime.strptime(value, dt_format)
## List helpers
def list_to_json(source_list):
"""
Serialise all the items in source_list to json
"""
result = []
for item in source_list:
result.append(item.to_json())
return result
def list_from_json(source_list_json):
"""
Deserialise all the items in source_list from json
"""
result = []
if source_list_json == [] or source_list_json == None:
return result
for list_item in source_list_json:
item = json.loads(list_item)
try:
if item['class_name'] == 'Departure':
temp = Departure()
elif item['class_name'] == 'Disruption':
temp = Disruption()
elif item['class_name'] == 'Station':
temp = Station()
elif item['class_name'] == 'Trip':
temp = Trip()
elif item['class_name'] == 'TripRemark':
temp = TripRemark()
elif item['class_name'] == 'TripStop':
temp = TripStop()
elif item['class_name'] == 'TripSubpart':
temp = TripSubpart()
else:
print('Unrecognised Class ' + item['class_name'] + ', skipping')
continue
temp.from_json(list_item)
result.append(temp)
except KeyError:
print('Unrecognised item with no class_name, skipping')
continue
return result
def list_diff(list_a, list_b):
"""
Return the items from list_b that differ from list_a
"""
result = []
for item in list_b:
if not item in list_a:
result.append(item)
return result
def list_merge(list_a, list_b):
"""
Merge two lists without duplicating items
Args:
list_a: list
list_b: list
Returns:
New list with deduplicated items from list_a and list_b
"""
#return list(collections.OrderedDict.fromkeys(list_a + list_b))
#result = list(list_b)
result = []
for item in list_a:
if not item in result:
result.append(item)
for item in list_b:
if not item in result:
result.append(item)
return result
## NS API objects
@python_2_unicode_compatible
class BaseObject(object):
"""
Base object with useful functions
"""
def __getstate__(self):
result = self.__dict__.copy()
result['class_name'] = self.__class__.__name__
return result
def to_json(self):
"""
Create a JSON representation of this model
"""
#return json.dumps(self.__getstate__())
return json.dumps(self.__getstate__(), ensure_ascii=False)
def __setstate__(self, source_dict):
if not source_dict:
# Somehow the source is None
return
del source_dict['class_name']
self.__dict__ = source_dict
def from_json(self, source_json):
"""
Parse a JSON representation of this model back to, well, the model
"""
#source_dict = json.JSONDecoder(object_pairs_hook=collections.OrderedDict).decode(source_json)
source_dict = json.JSONDecoder().decode(source_json)
self.__setstate__(source_dict)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return self.__str__()
def __str__(self):
raise NotImplementedError('subclasses must override __str__()')
class Station(BaseObject):
"""
Information on a railway station
"""
def __init__(self, stat_dict=None):
if stat_dict is None:
return
self.key = stat_dict['Code']
self.code = stat_dict['Code']
self.uic_code = stat_dict['UICCode']
self.stationtype = stat_dict['Type']
self.names = {
'short': stat_dict['Namen']['Kort'],
'middle': stat_dict['Namen']['Middel'],
'long': stat_dict['Namen']['Lang']
}
self.country = stat_dict['Land']
self.lat = stat_dict['Lat']
self.lon = stat_dict['Lon']
self.synonyms = []
try:
raw_synonyms = stat_dict['Synoniemen']['Synoniem']
if isinstance(raw_synonyms, str):
raw_synonyms = [raw_synonyms]
for synonym in raw_synonyms:
self.synonyms.append(synonym)
except TypeError:
self.synonyms = []
def __str__(self):
return u'<Station> {0} {1}'.format(self.code, self.names['long'])
class Disruption(BaseObject):
"""
Planned and unplanned disruptions of the railroad traffic
"""
def __init__(self, part_dict=None):
if part_dict is None:
return
self.key = part_dict['id']
self.line = part_dict['Traject']
self.message = part_dict['Bericht']
try:
self.reason = part_dict['Reden']
except KeyError:
self.reason = None
try:
self.cause = part_dict['Oorzaak']
except KeyError:
self.cause = None
try:
self.delay_text = part_dict['Vertraging']
except KeyError:
self.delay_text = None
try:
self.timestamp = load_datetime(part_dict['Datum'], NS_DATETIME)
except:
self.timestamp = None
def __getstate__(self):
result = super(Disruption, self).__getstate__()
result['timestamp'] = result['timestamp'].isoformat()
return result
def __setstate__(self, source_dict):
super(Disruption, self).__setstate__(source_dict)
self.timestamp = load_datetime(self.timestamp, NS_DATETIME)
def __str__(self):
return u'<Disruption> {0}'.format(self.line)
#return u'<Disruption> {0}'.format(self.key)
class Departure(BaseObject):
"""
Information on a departing train on a certain station
"""
def __init__(self, departure_dict=None):
if departure_dict is None:
return
self.key = departure_dict['RitNummer'] + '_' + departure_dict['VertrekTijd']
self.trip_number = departure_dict['RitNummer']
self.departure_time = load_datetime(departure_dict['VertrekTijd'], NS_DATETIME)
try:
self.has_delay = True
self.departure_delay = departure_dict['VertrekVertraging']
self.departure_delay_text = departure_dict['VertrekVertragingTekst']
except KeyError:
self.has_delay = False
self.departure_platform = departure_dict['VertrekSpoor']
self.departure_platform_changed = departure_dict['VertrekSpoor']['@wijziging']
self.destination = departure_dict['EindBestemming']
try:
self.route_text = departure_dict['RouteTekst']
except KeyError:
self.route_text = None
self.train_type = departure_dict = ['TreinSoort']
self.carrier = departure_dict = ['Vervoerder']
try:
self.journey_tip = departure_dict = ['ReisTip']
except KeyError:
self.journey_tip = None
try:
self.remarks = departure_dict = ['Opmerkingen']
except KeyError:
self.remarks = []
def __getstate__(self):
result = super(Departure, self).__getstate__()
result['departure_time'] = result['departure_time'].isoformat()
return result
def __setstate__(self, source_dict):
super(Departure, self).__setstate__(source_dict)
self.departure_time = load_datetime(source_dict['VertrekTijd'], NS_DATETIME)
@property
def delay(self):
"""
Return the delay of the train for this instance
"""
if self.has_delay:
return self.departure_delay
else:
return None
def __str__(self):
return u'<Departure> trip_number: {0} {1} {2}'.format(self.trip_number, self.destination, self.departure_time)
class TripRemark(BaseObject):
"""
Notes on this route, generally about disruptions
"""
def __init__(self, part_dict=None):
if part_dict is None:
return
self.key = part_dict['Id']
if part_dict['Ernstig'] == 'false':
self.is_grave = False
else:
self.is_grave = True
self.message = part_dict['Text']
def __str__(self):
return u'<TripRemark> {0} {1}'.format(self.is_grave, self.message)
class TripStop(BaseObject):
"""
Information on a stop on a route (station, time, platform)
"""
def __init__(self, part_dict=None):
if part_dict is None:
return
self.name = part_dict['Naam']
try:
self.time = load_datetime(part_dict['Tijd'], NS_DATETIME)
self.key = simple_time(self.time) + '_' + self.name
except TypeError:
# In some rare cases part_dict['Tijd'] can be None
#self.time = datetime(2000, 1, 1, 0, 0, 0)
self.time = None
self.key = None
self.platform_changed = False
try:
self.platform = part_dict['Spoor']['#text']
if part_dict['Spoor']['@wijziging'] == 'true':
self.platform_changed = True
except KeyError:
self.platform = None
try:
self.delay = part_dict['VertrekVertraging']
except KeyError:
self.delay = None
def __getstate__(self):
result = super(TripStop, self).__getstate__()
result['time'] = result['time'].isoformat()
return result
def __setstate__(self, source_dict):
super(TripStop, self).__setstate__(source_dict)
self.time = load_datetime(self.time, NS_DATETIME)
def __str__(self):
return u'<TripStop> {0}'.format(self.name)
class TripSubpart(BaseObject):
"""
Sub route; each part means a transfer
"""
def __init__(self, part_dict=None):
if part_dict is None:
return
self.trip_type = part_dict['@reisSoort']
self.transporter = part_dict['Vervoerder']
self.transport_type = part_dict['VervoerType']
self.journey_id = part_dict['RitNummer']
# VOLGENS-PLAN, GEANNULEERD (=vervallen trein), GEWIJZIGD (=planaanpassing in de bijsturing op de dag zelf),
# OVERSTAP-NIET-MOGELIJK, VERTRAAGD, NIEUW (=extra trein)
self.status = part_dict['Status']
self.going = True
self.has_delay = False
if self.status == 'GEANNULEERD':
self.going = False
if self.status == 'GEANNULEERD' or self.status == 'GEWIJZIGD' or self.status == 'VERTRAAGD':
self.has_delay = True
try:
self.disruption_key = part_dict['OngeplandeStoringId']
except KeyError:
self.disruption_key = None
self.stops = []
raw_stops = part_dict['ReisStop']
for raw_stop in raw_stops:
stop = TripStop(raw_stop)
self.stops.append(stop)
@property
def destination(self):
return self.stops[-1].name
@property
def departure_time(self):
return self.stops[0].time
def has_departure_delay(self, arrival_check=True):
if arrival_check==False and self.has_delay:
# Check whether one or more stops have delay, except last one
delay_found = False
for stop in self.stops:
if stop.delay and stop:
delay_found = True
elif stop.delay == False and stop == self.stops[-1]:
# Last stop and it doesn't have a delay
return delay_found
else:
return self.has_delay
def __getstate__(self):
result = super(TripSubpart, self).__getstate__()
stops = []
for stop in self.stops:
stops.append(stop.to_json())
result['stops'] = stops
return result
def __setstate__(self, source_dict):
super(TripSubpart, self).__setstate__(source_dict)
trip_stops = []
for raw_stop in self.stops:
trip_stop = TripStop()
trip_stop.from_json(raw_stop)
trip_stops.append(trip_stop)
self.stops = trip_stops
def __str__(self):
return u'<TripSubpart> [{0}] {1} {2} {3} {4}'.format(self.going, self.journey_id, self.trip_type, self.transport_type, self.status)
class Trip(BaseObject):
"""
Suggested route for the provided departure/destination combination
"""
def __init__(self, trip_dict=None, datetime=None):
if trip_dict is None:
return
# self.key = ??
try:
# VOLGENS-PLAN, GEWIJZIGD, VERTRAAGD, NIEUW, NIET-OPTIMAAL, NIET-MOGELIJK, PLAN-GEWIJZIGD
self.status = trip_dict['Status']
except KeyError:
self.status = None
self.nr_transfers = trip_dict['AantalOverstappen']
try:
self.travel_time_planned = trip_dict['GeplandeReisTijd']
self.going = True
except KeyError:
# Train has been cancelled
self.travel_time_planned = None
self.going = False
if self.status == 'NIET-MOGELIJK':
# Train has been cancelled
self.going = False
self.travel_time_actual = trip_dict['ActueleReisTijd']
self.is_optimal = True if trip_dict['Optimaal'] == 'true' else False
dt_format = "%Y-%m-%dT%H:%M:%S%z"
self.requested_time = datetime
try:
self.departure_time_planned = load_datetime(trip_dict['GeplandeVertrekTijd'], dt_format)
except:
self.departure_time_planned = None
try:
self.departure_time_actual = load_datetime(trip_dict['ActueleVertrekTijd'], dt_format)
except:
self.departure_time_actual = None
try:
self.arrival_time_planned = load_datetime(trip_dict['GeplandeAankomstTijd'], dt_format)
except:
self.arrival_time_planned = None
try:
self.arrival_time_actual = load_datetime(trip_dict['ActueleAankomstTijd'], dt_format)
except:
self.arrival_time_actual = None
self.trip_parts = []
raw_parts = trip_dict['ReisDeel']
if isinstance(trip_dict['ReisDeel'], collections.OrderedDict):
raw_parts = [trip_dict['ReisDeel']]
for part in raw_parts:
trip_part = TripSubpart(part)
self.trip_parts.append(trip_part)
try:
raw_remarks = trip_dict['Melding']
self.trip_remarks = []
if isinstance(raw_remarks, collections.OrderedDict):
raw_remarks = [raw_remarks]
for remark in raw_remarks:
trip_remark = TripRemark(remark)
self.trip_remarks.append(trip_remark)
except KeyError:
self.trip_remarks = []
@property
def departure(self):
return self.trip_parts[0].stops[0].name
@property
def destination(self):
return self.trip_parts[-1].stops[-1].name
@property
def delay(self):
"""
Return the delay of the train for this instance
"""
delay = {'departure_time': None, 'departure_delay': None, 'requested_differs': None,
'remarks': self.trip_remarks, 'parts': []}
if self.departure_time_actual > self.departure_time_planned:
delay['departure_delay'] = self.departure_time_actual - self.departure_time_planned
delay['departure_time'] = self.departure_time_actual
if self.requested_time != self.departure_time_actual:
delay['requested_differs'] = self.departure_time_actual
for part in self.trip_parts:
if part.has_delay:
delay['parts'].append(part)
return delay
def has_delay(self, arrival_check=True):
if self.status != 'VOLGENS-PLAN':
return True
for subpart in self.trip_parts:
if subpart.has_delay:
if subpart == self.trip_parts[-1]:
# Is last part of the trip, check if it is only the arrival
return subpart.has_departure_delay(arrival_check)
return True
if self.requested_time != self.departure_time_actual:
return True
return False
def has_departure_delay(self, subpartcheck=True):
"""
Deprecated
"""
if self.status != 'VOLGENS-PLAN':
return True
if subpartcheck and self.trip_parts[0].has_delay:
return True
if self.requested_time != self.departure_time_actual:
return True
return False
def __getstate__(self):
result = super(Trip, self).__getstate__()
result['requested_time'] = result['requested_time'].isoformat()
result['departure_time_actual'] = result['departure_time_actual'].isoformat()
result['arrival_time_actual'] = result['arrival_time_actual'].isoformat()
result['departure_time_planned'] = result['departure_time_planned'].isoformat()
result['arrival_time_planned'] = result['arrival_time_planned'].isoformat()
trip_parts = []
for trip_part in result['trip_parts']:
trip_parts.append(trip_part.to_json())
result['trip_parts'] = trip_parts
trip_remarks = []
for trip_remark in result['trip_remarks']:
trip_remarks.append(trip_remark.to_json())
result['trip_remarks'] = trip_remarks
return result
def __setstate__(self, source_dict):
super(Trip, self).__setstate__(source_dict)
# TripSubpart deserialisation
trip_parts = []
subparts = self.trip_parts
for part in subparts:
subpart = TripSubpart()
subpart.from_json(part)
trip_parts.append(subpart)
self.trip_parts = trip_parts
# TripRemark deserialisation
trip_remarks = []
remarks = self.trip_remarks
for raw_remark in remarks:
remark = TripRemark()
remark.from_json(raw_remark)
trip_remarks.append(remark)
self.trip_remarks = trip_remarks
# Datetime stamps
self.departure_time_planned = load_datetime(self.departure_time_planned, NS_DATETIME)
self.departure_time_actual = load_datetime(self.departure_time_actual, NS_DATETIME)
self.arrival_time_planned = load_datetime(self.arrival_time_planned, NS_DATETIME)
self.arrival_time_actual = load_datetime(self.arrival_time_actual, NS_DATETIME)
self.requested_time = load_datetime(self.requested_time, NS_DATETIME)
def delay_text(self):
"""
If trip has delays, format a natural language summary
"""
# TODO implement
pass
@classmethod
def get_actual(cls, trip_list, time):
"""
Look for the train actually leaving at time
"""
for trip in trip_list:
if simple_time(trip.departure_time_planned) == time:
return trip
return None
@classmethod
def get_optimal(cls, trip_list):
"""
Look for the optimal trip in the list
"""
for trip in trip_list:
if trip.is_optimal:
return trip
return None
def __str__(self):
return u'<Trip> {0} plan: {1} actual: {2} transfers: {3}'.format(self.has_delay, self.departure_time_planned, self.departure_time_actual, self.nr_transfers)
class NSAPI(object):
"""
NS API object
Library to query the official Dutch railways API
"""
def __init__(self, username, apikey):
self.username = username
self.apikey = apikey
def _request(self, method, url, postdata=None, params=None):
headers = {"Accept": "application/xml",
"Content-Type": "application/xml",
"User-Agent": "ns_api"}
if postdata:
postdata = json.dumps(postdata)
r = requests.request(method,
url,
data=postdata,
params=params,
headers=headers,
files=None,
auth=HTTPBasicAuth(self.username, self.apikey))
r.encoding = 'utf-8'
r.raise_for_status()
return r.text
def parse_disruptions(self, xml):
"""
Parse the NS API xml result into Disruption objects
@param xml: raw XML result from the NS API
"""
obj = xmltodict.parse(xml)
disruptions = {}
disruptions['unplanned'] = []
disruptions['planned'] = []
if obj['Storingen']['Ongepland']:
raw_disruptions = obj['Storingen']['Ongepland']['Storing']
if isinstance(raw_disruptions, collections.OrderedDict):
raw_disruptions = [raw_disruptions]
for disruption in raw_disruptions:
newdis = Disruption(disruption)
#print(newdis.__dict__)
disruptions['unplanned'].append(newdis)
if obj['Storingen']['Gepland']:
raw_disruptions = obj['Storingen']['Gepland']['Storing']
if isinstance(raw_disruptions, collections.OrderedDict):
raw_disruptions = [raw_disruptions]
for disruption in raw_disruptions:
newdis = Disruption(disruption)
#print(newdis.__dict__)
disruptions['planned'].append(newdis)
return disruptions
def get_disruptions(self, station=None, actual=True, unplanned=True):
"""
Fetch the current disruptions, or even the planned ones
@param station: station to lookup
@param actual: only actual disruptions, or a
actuele storingen (=ongeplande storingen + actuele werkzaamheden)
geplande werkzaamheden (=geplande werkzaamheden)
actuele storingen voor een gespecificeerd station (=ongeplande storingen + actuele werkzaamheden)
"""
url = "http://webservices.ns.nl/ns-api-storingen?station=${Stationsnaam}&actual=${true or false}&unplanned=${true or false}"
url = "http://webservices.ns.nl/ns-api-storingen?actual=true&unplanned=true"
raw_disruptions = self._request('GET', url)
return self.parse_disruptions(raw_disruptions)
def parse_departures(self, xml):
"""
Parse the NS API xml result into Departure objects
@param xml: raw XML result from the NS API
"""
obj = xmltodict.parse(xml)
departures = []
for departure in obj['ActueleVertrekTijden']['VertrekkendeTrein']:
newdep = Departure(departure)
departures.append(newdep)
#print('-- dep --')
#print(newdep.__dict__)
#print(newdep.to_json())
print(newdep.delay)
return departures
def get_departures(self, station):
"""
Fetch the current departure times from this station
http://webservices.ns.nl/ns-api-avt?station=${Naam of afkorting Station}
@param station: station to lookup
"""
url = 'http://webservices.ns.nl/ns-api-avt?station=' + station
raw_departures = self._request('GET', url)
return self.parse_departures(raw_departures)
def parse_trips(self, xml, requested_time):
"""
Parse the NS API xml result into Trip objects
"""
obj = xmltodict.parse(xml)
trips = []
if 'error' in obj:
print('Error in trips: ' + obj['error']['message'])
return None
try:
for trip in obj['ReisMogelijkheden']['ReisMogelijkheid']:
newtrip = Trip(trip, requested_time)
trips.append(newtrip)
except TypeError:
# If no options are found, obj['ReisMogelijkheden'] is None
return None
return trips
def get_trips(self, timestamp, start, via, destination, departure=True, prev_advices=1, next_advices=1):
"""
Fetch trip possibilities for these parameters
http://webservices.ns.nl/ns-api-treinplanner?<parameters>
fromStation
toStation
dateTime: 2012-02-21T15:50
departure: true for starting at timestamp, false for arriving at timestamp
previousAdvices
nextAdvices
"""
timezonestring = '+0100'
if is_dst('Europe/Amsterdam'):
timezonestring = '+0200'
url = 'http://webservices.ns.nl/ns-api-treinplanner?'
url = url + 'fromStation=' + start
url = url + '&toStation=' + destination
if via:
url = url + '&via=' + via
if len(timestamp) == 5:
# Format of HH:MM - api needs yyyy-mm-ddThh:mm
timestamp = time.strftime("%Y-%m-%d") + 'T' + timestamp
#requested_time = datetime.strptime(timestamp, "%Y-%m-%dT%H:%M")
# TODO: DST/normal time
requested_time = load_datetime(timestamp + timezonestring, "%Y-%m-%dT%H:%M%z")
else:
#requested_time = datetime.strptime(timestamp, "%d-%m-%Y %H:%M")
requested_time = load_datetime(timestamp + timezonestring, "%d-%m-%Y %H:%M%z")
timestamp = datetime.strptime(timestamp, "%d-%m-%Y %H:%M").strftime("%Y-%m-%dT%H:%M")
url = url + '&previousAdvices=' + str(prev_advices)
url = url + '&nextAdvices=' + str(next_advices)
url = url + '&dateTime=' + timestamp
raw_trips = self._request('GET', url)
return self.parse_trips(raw_trips, requested_time)
def parse_stations(self, xml):
obj = xmltodict.parse(xml)
stations = []
for station in obj['Stations']['Station']:
newstat = Station(station)
stations.append(newstat)
print(len(stations))
return stations
def get_stations(self):
"""
Fetch the list of stations
"""
url = 'http://webservices.ns.nl/ns-api-stations-v2'
raw_stations = self._request('GET', url)
return self.parse_stations(raw_stations)
|
aquatix/ns-api
|
ns_api.py
|
list_merge
|
python
|
def list_merge(list_a, list_b):
#return list(collections.OrderedDict.fromkeys(list_a + list_b))
#result = list(list_b)
result = []
for item in list_a:
if not item in result:
result.append(item)
for item in list_b:
if not item in result:
result.append(item)
return result
|
Merge two lists without duplicating items
Args:
list_a: list
list_b: list
Returns:
New list with deduplicated items from list_a and list_b
|
train
|
https://github.com/aquatix/ns-api/blob/9b3379f8df6217132f457c4363457c16321c2448/ns_api.py#L148-L167
| null |
"""
Library to query the official Dutch railways API
"""
from __future__ import print_function
import collections
import json
import time
from datetime import datetime, timedelta
import pytz
import requests
import xmltodict
from future.utils import python_2_unicode_compatible
from pytz.tzinfo import StaticTzInfo
from requests.auth import HTTPBasicAuth
## ns-api library version
__version__ = '2.7.5'
## Date/time helpers
NS_DATETIME = "%Y-%m-%dT%H:%M:%S%z"
def datetime_to_string(timestamp, dt_format='%Y-%m-%d %H:%M:%S'):
"""
Format datetime object to string
"""
return timestamp.strftime(dt_format)
def simple_time(value):
"""
Format a datetime or timedelta object to a string of format HH:MM
"""
if isinstance(value, timedelta):
return ':'.join(str(value).split(':')[:2])
return datetime_to_string(value, '%H:%M')
## Timezone helpers
def is_dst(zonename):
"""
Find out whether it's Daylight Saving Time in this timezone
"""
tz = pytz.timezone(zonename)
now = pytz.utc.localize(datetime.utcnow())
return now.astimezone(tz).dst() != timedelta(0)
class OffsetTime(StaticTzInfo):
"""
A dumb timezone based on offset such as +0530, -0600, etc.
"""
def __init__(self, offset):
hours = int(offset[:3])
minutes = int(offset[0] + offset[3:])
self._utcoffset = timedelta(hours=hours, minutes=minutes)
def load_datetime(value, dt_format):
"""
Create timezone-aware datetime object
"""
if dt_format.endswith('%z'):
dt_format = dt_format[:-2]
offset = value[-5:]
value = value[:-5]
if offset != offset.replace(':', ''):
# strip : from HHMM if needed (isoformat() adds it between HH and MM)
offset = '+' + offset.replace(':', '')
value = value[:-1]
return OffsetTime(offset).localize(datetime.strptime(value, dt_format))
return datetime.strptime(value, dt_format)
## List helpers
def list_to_json(source_list):
"""
Serialise all the items in source_list to json
"""
result = []
for item in source_list:
result.append(item.to_json())
return result
def list_from_json(source_list_json):
"""
Deserialise all the items in source_list from json
"""
result = []
if source_list_json == [] or source_list_json == None:
return result
for list_item in source_list_json:
item = json.loads(list_item)
try:
if item['class_name'] == 'Departure':
temp = Departure()
elif item['class_name'] == 'Disruption':
temp = Disruption()
elif item['class_name'] == 'Station':
temp = Station()
elif item['class_name'] == 'Trip':
temp = Trip()
elif item['class_name'] == 'TripRemark':
temp = TripRemark()
elif item['class_name'] == 'TripStop':
temp = TripStop()
elif item['class_name'] == 'TripSubpart':
temp = TripSubpart()
else:
print('Unrecognised Class ' + item['class_name'] + ', skipping')
continue
temp.from_json(list_item)
result.append(temp)
except KeyError:
print('Unrecognised item with no class_name, skipping')
continue
return result
def list_diff(list_a, list_b):
"""
Return the items from list_b that differ from list_a
"""
result = []
for item in list_b:
if not item in list_a:
result.append(item)
return result
def list_same(list_a, list_b):
"""
Return the items from list_b that are also on list_a
"""
result = []
for item in list_b:
if item in list_a:
result.append(item)
return result
## NS API objects
@python_2_unicode_compatible
class BaseObject(object):
"""
Base object with useful functions
"""
def __getstate__(self):
result = self.__dict__.copy()
result['class_name'] = self.__class__.__name__
return result
def to_json(self):
"""
Create a JSON representation of this model
"""
#return json.dumps(self.__getstate__())
return json.dumps(self.__getstate__(), ensure_ascii=False)
def __setstate__(self, source_dict):
if not source_dict:
# Somehow the source is None
return
del source_dict['class_name']
self.__dict__ = source_dict
def from_json(self, source_json):
"""
Parse a JSON representation of this model back to, well, the model
"""
#source_dict = json.JSONDecoder(object_pairs_hook=collections.OrderedDict).decode(source_json)
source_dict = json.JSONDecoder().decode(source_json)
self.__setstate__(source_dict)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return self.__str__()
def __str__(self):
raise NotImplementedError('subclasses must override __str__()')
class Station(BaseObject):
"""
Information on a railway station
"""
def __init__(self, stat_dict=None):
if stat_dict is None:
return
self.key = stat_dict['Code']
self.code = stat_dict['Code']
self.uic_code = stat_dict['UICCode']
self.stationtype = stat_dict['Type']
self.names = {
'short': stat_dict['Namen']['Kort'],
'middle': stat_dict['Namen']['Middel'],
'long': stat_dict['Namen']['Lang']
}
self.country = stat_dict['Land']
self.lat = stat_dict['Lat']
self.lon = stat_dict['Lon']
self.synonyms = []
try:
raw_synonyms = stat_dict['Synoniemen']['Synoniem']
if isinstance(raw_synonyms, str):
raw_synonyms = [raw_synonyms]
for synonym in raw_synonyms:
self.synonyms.append(synonym)
except TypeError:
self.synonyms = []
def __str__(self):
return u'<Station> {0} {1}'.format(self.code, self.names['long'])
class Disruption(BaseObject):
"""
Planned and unplanned disruptions of the railroad traffic
"""
def __init__(self, part_dict=None):
if part_dict is None:
return
self.key = part_dict['id']
self.line = part_dict['Traject']
self.message = part_dict['Bericht']
try:
self.reason = part_dict['Reden']
except KeyError:
self.reason = None
try:
self.cause = part_dict['Oorzaak']
except KeyError:
self.cause = None
try:
self.delay_text = part_dict['Vertraging']
except KeyError:
self.delay_text = None
try:
self.timestamp = load_datetime(part_dict['Datum'], NS_DATETIME)
except:
self.timestamp = None
def __getstate__(self):
result = super(Disruption, self).__getstate__()
result['timestamp'] = result['timestamp'].isoformat()
return result
def __setstate__(self, source_dict):
super(Disruption, self).__setstate__(source_dict)
self.timestamp = load_datetime(self.timestamp, NS_DATETIME)
def __str__(self):
return u'<Disruption> {0}'.format(self.line)
#return u'<Disruption> {0}'.format(self.key)
class Departure(BaseObject):
"""
Information on a departing train on a certain station
"""
def __init__(self, departure_dict=None):
if departure_dict is None:
return
self.key = departure_dict['RitNummer'] + '_' + departure_dict['VertrekTijd']
self.trip_number = departure_dict['RitNummer']
self.departure_time = load_datetime(departure_dict['VertrekTijd'], NS_DATETIME)
try:
self.has_delay = True
self.departure_delay = departure_dict['VertrekVertraging']
self.departure_delay_text = departure_dict['VertrekVertragingTekst']
except KeyError:
self.has_delay = False
self.departure_platform = departure_dict['VertrekSpoor']
self.departure_platform_changed = departure_dict['VertrekSpoor']['@wijziging']
self.destination = departure_dict['EindBestemming']
try:
self.route_text = departure_dict['RouteTekst']
except KeyError:
self.route_text = None
self.train_type = departure_dict = ['TreinSoort']
self.carrier = departure_dict = ['Vervoerder']
try:
self.journey_tip = departure_dict = ['ReisTip']
except KeyError:
self.journey_tip = None
try:
self.remarks = departure_dict = ['Opmerkingen']
except KeyError:
self.remarks = []
def __getstate__(self):
result = super(Departure, self).__getstate__()
result['departure_time'] = result['departure_time'].isoformat()
return result
def __setstate__(self, source_dict):
super(Departure, self).__setstate__(source_dict)
self.departure_time = load_datetime(source_dict['VertrekTijd'], NS_DATETIME)
@property
def delay(self):
"""
Return the delay of the train for this instance
"""
if self.has_delay:
return self.departure_delay
else:
return None
def __str__(self):
return u'<Departure> trip_number: {0} {1} {2}'.format(self.trip_number, self.destination, self.departure_time)
class TripRemark(BaseObject):
"""
Notes on this route, generally about disruptions
"""
def __init__(self, part_dict=None):
if part_dict is None:
return
self.key = part_dict['Id']
if part_dict['Ernstig'] == 'false':
self.is_grave = False
else:
self.is_grave = True
self.message = part_dict['Text']
def __str__(self):
return u'<TripRemark> {0} {1}'.format(self.is_grave, self.message)
class TripStop(BaseObject):
"""
Information on a stop on a route (station, time, platform)
"""
def __init__(self, part_dict=None):
if part_dict is None:
return
self.name = part_dict['Naam']
try:
self.time = load_datetime(part_dict['Tijd'], NS_DATETIME)
self.key = simple_time(self.time) + '_' + self.name
except TypeError:
# In some rare cases part_dict['Tijd'] can be None
#self.time = datetime(2000, 1, 1, 0, 0, 0)
self.time = None
self.key = None
self.platform_changed = False
try:
self.platform = part_dict['Spoor']['#text']
if part_dict['Spoor']['@wijziging'] == 'true':
self.platform_changed = True
except KeyError:
self.platform = None
try:
self.delay = part_dict['VertrekVertraging']
except KeyError:
self.delay = None
def __getstate__(self):
result = super(TripStop, self).__getstate__()
result['time'] = result['time'].isoformat()
return result
def __setstate__(self, source_dict):
super(TripStop, self).__setstate__(source_dict)
self.time = load_datetime(self.time, NS_DATETIME)
def __str__(self):
return u'<TripStop> {0}'.format(self.name)
class TripSubpart(BaseObject):
"""
Sub route; each part means a transfer
"""
def __init__(self, part_dict=None):
if part_dict is None:
return
self.trip_type = part_dict['@reisSoort']
self.transporter = part_dict['Vervoerder']
self.transport_type = part_dict['VervoerType']
self.journey_id = part_dict['RitNummer']
# VOLGENS-PLAN, GEANNULEERD (=vervallen trein), GEWIJZIGD (=planaanpassing in de bijsturing op de dag zelf),
# OVERSTAP-NIET-MOGELIJK, VERTRAAGD, NIEUW (=extra trein)
self.status = part_dict['Status']
self.going = True
self.has_delay = False
if self.status == 'GEANNULEERD':
self.going = False
if self.status == 'GEANNULEERD' or self.status == 'GEWIJZIGD' or self.status == 'VERTRAAGD':
self.has_delay = True
try:
self.disruption_key = part_dict['OngeplandeStoringId']
except KeyError:
self.disruption_key = None
self.stops = []
raw_stops = part_dict['ReisStop']
for raw_stop in raw_stops:
stop = TripStop(raw_stop)
self.stops.append(stop)
@property
def destination(self):
return self.stops[-1].name
@property
def departure_time(self):
return self.stops[0].time
def has_departure_delay(self, arrival_check=True):
if arrival_check==False and self.has_delay:
# Check whether one or more stops have delay, except last one
delay_found = False
for stop in self.stops:
if stop.delay and stop:
delay_found = True
elif stop.delay == False and stop == self.stops[-1]:
# Last stop and it doesn't have a delay
return delay_found
else:
return self.has_delay
def __getstate__(self):
result = super(TripSubpart, self).__getstate__()
stops = []
for stop in self.stops:
stops.append(stop.to_json())
result['stops'] = stops
return result
def __setstate__(self, source_dict):
super(TripSubpart, self).__setstate__(source_dict)
trip_stops = []
for raw_stop in self.stops:
trip_stop = TripStop()
trip_stop.from_json(raw_stop)
trip_stops.append(trip_stop)
self.stops = trip_stops
def __str__(self):
return u'<TripSubpart> [{0}] {1} {2} {3} {4}'.format(self.going, self.journey_id, self.trip_type, self.transport_type, self.status)
class Trip(BaseObject):
"""
Suggested route for the provided departure/destination combination
"""
def __init__(self, trip_dict=None, datetime=None):
if trip_dict is None:
return
# self.key = ??
try:
# VOLGENS-PLAN, GEWIJZIGD, VERTRAAGD, NIEUW, NIET-OPTIMAAL, NIET-MOGELIJK, PLAN-GEWIJZIGD
self.status = trip_dict['Status']
except KeyError:
self.status = None
self.nr_transfers = trip_dict['AantalOverstappen']
try:
self.travel_time_planned = trip_dict['GeplandeReisTijd']
self.going = True
except KeyError:
# Train has been cancelled
self.travel_time_planned = None
self.going = False
if self.status == 'NIET-MOGELIJK':
# Train has been cancelled
self.going = False
self.travel_time_actual = trip_dict['ActueleReisTijd']
self.is_optimal = True if trip_dict['Optimaal'] == 'true' else False
dt_format = "%Y-%m-%dT%H:%M:%S%z"
self.requested_time = datetime
try:
self.departure_time_planned = load_datetime(trip_dict['GeplandeVertrekTijd'], dt_format)
except:
self.departure_time_planned = None
try:
self.departure_time_actual = load_datetime(trip_dict['ActueleVertrekTijd'], dt_format)
except:
self.departure_time_actual = None
try:
self.arrival_time_planned = load_datetime(trip_dict['GeplandeAankomstTijd'], dt_format)
except:
self.arrival_time_planned = None
try:
self.arrival_time_actual = load_datetime(trip_dict['ActueleAankomstTijd'], dt_format)
except:
self.arrival_time_actual = None
self.trip_parts = []
raw_parts = trip_dict['ReisDeel']
if isinstance(trip_dict['ReisDeel'], collections.OrderedDict):
raw_parts = [trip_dict['ReisDeel']]
for part in raw_parts:
trip_part = TripSubpart(part)
self.trip_parts.append(trip_part)
try:
raw_remarks = trip_dict['Melding']
self.trip_remarks = []
if isinstance(raw_remarks, collections.OrderedDict):
raw_remarks = [raw_remarks]
for remark in raw_remarks:
trip_remark = TripRemark(remark)
self.trip_remarks.append(trip_remark)
except KeyError:
self.trip_remarks = []
@property
def departure(self):
return self.trip_parts[0].stops[0].name
@property
def destination(self):
return self.trip_parts[-1].stops[-1].name
@property
def delay(self):
"""
Return the delay of the train for this instance
"""
delay = {'departure_time': None, 'departure_delay': None, 'requested_differs': None,
'remarks': self.trip_remarks, 'parts': []}
if self.departure_time_actual > self.departure_time_planned:
delay['departure_delay'] = self.departure_time_actual - self.departure_time_planned
delay['departure_time'] = self.departure_time_actual
if self.requested_time != self.departure_time_actual:
delay['requested_differs'] = self.departure_time_actual
for part in self.trip_parts:
if part.has_delay:
delay['parts'].append(part)
return delay
def has_delay(self, arrival_check=True):
if self.status != 'VOLGENS-PLAN':
return True
for subpart in self.trip_parts:
if subpart.has_delay:
if subpart == self.trip_parts[-1]:
# Is last part of the trip, check if it is only the arrival
return subpart.has_departure_delay(arrival_check)
return True
if self.requested_time != self.departure_time_actual:
return True
return False
def has_departure_delay(self, subpartcheck=True):
"""
Deprecated
"""
if self.status != 'VOLGENS-PLAN':
return True
if subpartcheck and self.trip_parts[0].has_delay:
return True
if self.requested_time != self.departure_time_actual:
return True
return False
def __getstate__(self):
result = super(Trip, self).__getstate__()
result['requested_time'] = result['requested_time'].isoformat()
result['departure_time_actual'] = result['departure_time_actual'].isoformat()
result['arrival_time_actual'] = result['arrival_time_actual'].isoformat()
result['departure_time_planned'] = result['departure_time_planned'].isoformat()
result['arrival_time_planned'] = result['arrival_time_planned'].isoformat()
trip_parts = []
for trip_part in result['trip_parts']:
trip_parts.append(trip_part.to_json())
result['trip_parts'] = trip_parts
trip_remarks = []
for trip_remark in result['trip_remarks']:
trip_remarks.append(trip_remark.to_json())
result['trip_remarks'] = trip_remarks
return result
def __setstate__(self, source_dict):
super(Trip, self).__setstate__(source_dict)
# TripSubpart deserialisation
trip_parts = []
subparts = self.trip_parts
for part in subparts:
subpart = TripSubpart()
subpart.from_json(part)
trip_parts.append(subpart)
self.trip_parts = trip_parts
# TripRemark deserialisation
trip_remarks = []
remarks = self.trip_remarks
for raw_remark in remarks:
remark = TripRemark()
remark.from_json(raw_remark)
trip_remarks.append(remark)
self.trip_remarks = trip_remarks
# Datetime stamps
self.departure_time_planned = load_datetime(self.departure_time_planned, NS_DATETIME)
self.departure_time_actual = load_datetime(self.departure_time_actual, NS_DATETIME)
self.arrival_time_planned = load_datetime(self.arrival_time_planned, NS_DATETIME)
self.arrival_time_actual = load_datetime(self.arrival_time_actual, NS_DATETIME)
self.requested_time = load_datetime(self.requested_time, NS_DATETIME)
def delay_text(self):
"""
If trip has delays, format a natural language summary
"""
# TODO implement
pass
@classmethod
def get_actual(cls, trip_list, time):
"""
Look for the train actually leaving at time
"""
for trip in trip_list:
if simple_time(trip.departure_time_planned) == time:
return trip
return None
@classmethod
def get_optimal(cls, trip_list):
"""
Look for the optimal trip in the list
"""
for trip in trip_list:
if trip.is_optimal:
return trip
return None
def __str__(self):
return u'<Trip> {0} plan: {1} actual: {2} transfers: {3}'.format(self.has_delay, self.departure_time_planned, self.departure_time_actual, self.nr_transfers)
class NSAPI(object):
"""
NS API object
Library to query the official Dutch railways API
"""
def __init__(self, username, apikey):
self.username = username
self.apikey = apikey
def _request(self, method, url, postdata=None, params=None):
headers = {"Accept": "application/xml",
"Content-Type": "application/xml",
"User-Agent": "ns_api"}
if postdata:
postdata = json.dumps(postdata)
r = requests.request(method,
url,
data=postdata,
params=params,
headers=headers,
files=None,
auth=HTTPBasicAuth(self.username, self.apikey))
r.encoding = 'utf-8'
r.raise_for_status()
return r.text
def parse_disruptions(self, xml):
"""
Parse the NS API xml result into Disruption objects
@param xml: raw XML result from the NS API
"""
obj = xmltodict.parse(xml)
disruptions = {}
disruptions['unplanned'] = []
disruptions['planned'] = []
if obj['Storingen']['Ongepland']:
raw_disruptions = obj['Storingen']['Ongepland']['Storing']
if isinstance(raw_disruptions, collections.OrderedDict):
raw_disruptions = [raw_disruptions]
for disruption in raw_disruptions:
newdis = Disruption(disruption)
#print(newdis.__dict__)
disruptions['unplanned'].append(newdis)
if obj['Storingen']['Gepland']:
raw_disruptions = obj['Storingen']['Gepland']['Storing']
if isinstance(raw_disruptions, collections.OrderedDict):
raw_disruptions = [raw_disruptions]
for disruption in raw_disruptions:
newdis = Disruption(disruption)
#print(newdis.__dict__)
disruptions['planned'].append(newdis)
return disruptions
def get_disruptions(self, station=None, actual=True, unplanned=True):
"""
Fetch the current disruptions, or even the planned ones
@param station: station to lookup
@param actual: only actual disruptions, or a
actuele storingen (=ongeplande storingen + actuele werkzaamheden)
geplande werkzaamheden (=geplande werkzaamheden)
actuele storingen voor een gespecificeerd station (=ongeplande storingen + actuele werkzaamheden)
"""
url = "http://webservices.ns.nl/ns-api-storingen?station=${Stationsnaam}&actual=${true or false}&unplanned=${true or false}"
url = "http://webservices.ns.nl/ns-api-storingen?actual=true&unplanned=true"
raw_disruptions = self._request('GET', url)
return self.parse_disruptions(raw_disruptions)
def parse_departures(self, xml):
"""
Parse the NS API xml result into Departure objects
@param xml: raw XML result from the NS API
"""
obj = xmltodict.parse(xml)
departures = []
for departure in obj['ActueleVertrekTijden']['VertrekkendeTrein']:
newdep = Departure(departure)
departures.append(newdep)
#print('-- dep --')
#print(newdep.__dict__)
#print(newdep.to_json())
print(newdep.delay)
return departures
def get_departures(self, station):
"""
Fetch the current departure times from this station
http://webservices.ns.nl/ns-api-avt?station=${Naam of afkorting Station}
@param station: station to lookup
"""
url = 'http://webservices.ns.nl/ns-api-avt?station=' + station
raw_departures = self._request('GET', url)
return self.parse_departures(raw_departures)
def parse_trips(self, xml, requested_time):
"""
Parse the NS API xml result into Trip objects
"""
obj = xmltodict.parse(xml)
trips = []
if 'error' in obj:
print('Error in trips: ' + obj['error']['message'])
return None
try:
for trip in obj['ReisMogelijkheden']['ReisMogelijkheid']:
newtrip = Trip(trip, requested_time)
trips.append(newtrip)
except TypeError:
# If no options are found, obj['ReisMogelijkheden'] is None
return None
return trips
def get_trips(self, timestamp, start, via, destination, departure=True, prev_advices=1, next_advices=1):
"""
Fetch trip possibilities for these parameters
http://webservices.ns.nl/ns-api-treinplanner?<parameters>
fromStation
toStation
dateTime: 2012-02-21T15:50
departure: true for starting at timestamp, false for arriving at timestamp
previousAdvices
nextAdvices
"""
timezonestring = '+0100'
if is_dst('Europe/Amsterdam'):
timezonestring = '+0200'
url = 'http://webservices.ns.nl/ns-api-treinplanner?'
url = url + 'fromStation=' + start
url = url + '&toStation=' + destination
if via:
url = url + '&via=' + via
if len(timestamp) == 5:
# Format of HH:MM - api needs yyyy-mm-ddThh:mm
timestamp = time.strftime("%Y-%m-%d") + 'T' + timestamp
#requested_time = datetime.strptime(timestamp, "%Y-%m-%dT%H:%M")
# TODO: DST/normal time
requested_time = load_datetime(timestamp + timezonestring, "%Y-%m-%dT%H:%M%z")
else:
#requested_time = datetime.strptime(timestamp, "%d-%m-%Y %H:%M")
requested_time = load_datetime(timestamp + timezonestring, "%d-%m-%Y %H:%M%z")
timestamp = datetime.strptime(timestamp, "%d-%m-%Y %H:%M").strftime("%Y-%m-%dT%H:%M")
url = url + '&previousAdvices=' + str(prev_advices)
url = url + '&nextAdvices=' + str(next_advices)
url = url + '&dateTime=' + timestamp
raw_trips = self._request('GET', url)
return self.parse_trips(raw_trips, requested_time)
def parse_stations(self, xml):
obj = xmltodict.parse(xml)
stations = []
for station in obj['Stations']['Station']:
newstat = Station(station)
stations.append(newstat)
print(len(stations))
return stations
def get_stations(self):
"""
Fetch the list of stations
"""
url = 'http://webservices.ns.nl/ns-api-stations-v2'
raw_stations = self._request('GET', url)
return self.parse_stations(raw_stations)
|
aquatix/ns-api
|
ns_api.py
|
Trip.delay
|
python
|
def delay(self):
delay = {'departure_time': None, 'departure_delay': None, 'requested_differs': None,
'remarks': self.trip_remarks, 'parts': []}
if self.departure_time_actual > self.departure_time_planned:
delay['departure_delay'] = self.departure_time_actual - self.departure_time_planned
delay['departure_time'] = self.departure_time_actual
if self.requested_time != self.departure_time_actual:
delay['requested_differs'] = self.departure_time_actual
for part in self.trip_parts:
if part.has_delay:
delay['parts'].append(part)
return delay
|
Return the delay of the train for this instance
|
train
|
https://github.com/aquatix/ns-api/blob/9b3379f8df6217132f457c4363457c16321c2448/ns_api.py#L580-L594
| null |
class Trip(BaseObject):
"""
Suggested route for the provided departure/destination combination
"""
def __init__(self, trip_dict=None, datetime=None):
if trip_dict is None:
return
# self.key = ??
try:
# VOLGENS-PLAN, GEWIJZIGD, VERTRAAGD, NIEUW, NIET-OPTIMAAL, NIET-MOGELIJK, PLAN-GEWIJZIGD
self.status = trip_dict['Status']
except KeyError:
self.status = None
self.nr_transfers = trip_dict['AantalOverstappen']
try:
self.travel_time_planned = trip_dict['GeplandeReisTijd']
self.going = True
except KeyError:
# Train has been cancelled
self.travel_time_planned = None
self.going = False
if self.status == 'NIET-MOGELIJK':
# Train has been cancelled
self.going = False
self.travel_time_actual = trip_dict['ActueleReisTijd']
self.is_optimal = True if trip_dict['Optimaal'] == 'true' else False
dt_format = "%Y-%m-%dT%H:%M:%S%z"
self.requested_time = datetime
try:
self.departure_time_planned = load_datetime(trip_dict['GeplandeVertrekTijd'], dt_format)
except:
self.departure_time_planned = None
try:
self.departure_time_actual = load_datetime(trip_dict['ActueleVertrekTijd'], dt_format)
except:
self.departure_time_actual = None
try:
self.arrival_time_planned = load_datetime(trip_dict['GeplandeAankomstTijd'], dt_format)
except:
self.arrival_time_planned = None
try:
self.arrival_time_actual = load_datetime(trip_dict['ActueleAankomstTijd'], dt_format)
except:
self.arrival_time_actual = None
self.trip_parts = []
raw_parts = trip_dict['ReisDeel']
if isinstance(trip_dict['ReisDeel'], collections.OrderedDict):
raw_parts = [trip_dict['ReisDeel']]
for part in raw_parts:
trip_part = TripSubpart(part)
self.trip_parts.append(trip_part)
try:
raw_remarks = trip_dict['Melding']
self.trip_remarks = []
if isinstance(raw_remarks, collections.OrderedDict):
raw_remarks = [raw_remarks]
for remark in raw_remarks:
trip_remark = TripRemark(remark)
self.trip_remarks.append(trip_remark)
except KeyError:
self.trip_remarks = []
@property
def departure(self):
return self.trip_parts[0].stops[0].name
@property
def destination(self):
return self.trip_parts[-1].stops[-1].name
@property
def has_delay(self, arrival_check=True):
if self.status != 'VOLGENS-PLAN':
return True
for subpart in self.trip_parts:
if subpart.has_delay:
if subpart == self.trip_parts[-1]:
# Is last part of the trip, check if it is only the arrival
return subpart.has_departure_delay(arrival_check)
return True
if self.requested_time != self.departure_time_actual:
return True
return False
def has_departure_delay(self, subpartcheck=True):
"""
Deprecated
"""
if self.status != 'VOLGENS-PLAN':
return True
if subpartcheck and self.trip_parts[0].has_delay:
return True
if self.requested_time != self.departure_time_actual:
return True
return False
def __getstate__(self):
result = super(Trip, self).__getstate__()
result['requested_time'] = result['requested_time'].isoformat()
result['departure_time_actual'] = result['departure_time_actual'].isoformat()
result['arrival_time_actual'] = result['arrival_time_actual'].isoformat()
result['departure_time_planned'] = result['departure_time_planned'].isoformat()
result['arrival_time_planned'] = result['arrival_time_planned'].isoformat()
trip_parts = []
for trip_part in result['trip_parts']:
trip_parts.append(trip_part.to_json())
result['trip_parts'] = trip_parts
trip_remarks = []
for trip_remark in result['trip_remarks']:
trip_remarks.append(trip_remark.to_json())
result['trip_remarks'] = trip_remarks
return result
def __setstate__(self, source_dict):
super(Trip, self).__setstate__(source_dict)
# TripSubpart deserialisation
trip_parts = []
subparts = self.trip_parts
for part in subparts:
subpart = TripSubpart()
subpart.from_json(part)
trip_parts.append(subpart)
self.trip_parts = trip_parts
# TripRemark deserialisation
trip_remarks = []
remarks = self.trip_remarks
for raw_remark in remarks:
remark = TripRemark()
remark.from_json(raw_remark)
trip_remarks.append(remark)
self.trip_remarks = trip_remarks
# Datetime stamps
self.departure_time_planned = load_datetime(self.departure_time_planned, NS_DATETIME)
self.departure_time_actual = load_datetime(self.departure_time_actual, NS_DATETIME)
self.arrival_time_planned = load_datetime(self.arrival_time_planned, NS_DATETIME)
self.arrival_time_actual = load_datetime(self.arrival_time_actual, NS_DATETIME)
self.requested_time = load_datetime(self.requested_time, NS_DATETIME)
def delay_text(self):
"""
If trip has delays, format a natural language summary
"""
# TODO implement
pass
@classmethod
def get_actual(cls, trip_list, time):
"""
Look for the train actually leaving at time
"""
for trip in trip_list:
if simple_time(trip.departure_time_planned) == time:
return trip
return None
@classmethod
def get_optimal(cls, trip_list):
"""
Look for the optimal trip in the list
"""
for trip in trip_list:
if trip.is_optimal:
return trip
return None
def __str__(self):
return u'<Trip> {0} plan: {1} actual: {2} transfers: {3}'.format(self.has_delay, self.departure_time_planned, self.departure_time_actual, self.nr_transfers)
|
aquatix/ns-api
|
ns_api.py
|
Trip.has_departure_delay
|
python
|
def has_departure_delay(self, subpartcheck=True):
if self.status != 'VOLGENS-PLAN':
return True
if subpartcheck and self.trip_parts[0].has_delay:
return True
if self.requested_time != self.departure_time_actual:
return True
return False
|
Deprecated
|
train
|
https://github.com/aquatix/ns-api/blob/9b3379f8df6217132f457c4363457c16321c2448/ns_api.py#L609-L619
| null |
class Trip(BaseObject):
"""
Suggested route for the provided departure/destination combination
"""
def __init__(self, trip_dict=None, datetime=None):
if trip_dict is None:
return
# self.key = ??
try:
# VOLGENS-PLAN, GEWIJZIGD, VERTRAAGD, NIEUW, NIET-OPTIMAAL, NIET-MOGELIJK, PLAN-GEWIJZIGD
self.status = trip_dict['Status']
except KeyError:
self.status = None
self.nr_transfers = trip_dict['AantalOverstappen']
try:
self.travel_time_planned = trip_dict['GeplandeReisTijd']
self.going = True
except KeyError:
# Train has been cancelled
self.travel_time_planned = None
self.going = False
if self.status == 'NIET-MOGELIJK':
# Train has been cancelled
self.going = False
self.travel_time_actual = trip_dict['ActueleReisTijd']
self.is_optimal = True if trip_dict['Optimaal'] == 'true' else False
dt_format = "%Y-%m-%dT%H:%M:%S%z"
self.requested_time = datetime
try:
self.departure_time_planned = load_datetime(trip_dict['GeplandeVertrekTijd'], dt_format)
except:
self.departure_time_planned = None
try:
self.departure_time_actual = load_datetime(trip_dict['ActueleVertrekTijd'], dt_format)
except:
self.departure_time_actual = None
try:
self.arrival_time_planned = load_datetime(trip_dict['GeplandeAankomstTijd'], dt_format)
except:
self.arrival_time_planned = None
try:
self.arrival_time_actual = load_datetime(trip_dict['ActueleAankomstTijd'], dt_format)
except:
self.arrival_time_actual = None
self.trip_parts = []
raw_parts = trip_dict['ReisDeel']
if isinstance(trip_dict['ReisDeel'], collections.OrderedDict):
raw_parts = [trip_dict['ReisDeel']]
for part in raw_parts:
trip_part = TripSubpart(part)
self.trip_parts.append(trip_part)
try:
raw_remarks = trip_dict['Melding']
self.trip_remarks = []
if isinstance(raw_remarks, collections.OrderedDict):
raw_remarks = [raw_remarks]
for remark in raw_remarks:
trip_remark = TripRemark(remark)
self.trip_remarks.append(trip_remark)
except KeyError:
self.trip_remarks = []
@property
def departure(self):
return self.trip_parts[0].stops[0].name
@property
def destination(self):
return self.trip_parts[-1].stops[-1].name
@property
def delay(self):
"""
Return the delay of the train for this instance
"""
delay = {'departure_time': None, 'departure_delay': None, 'requested_differs': None,
'remarks': self.trip_remarks, 'parts': []}
if self.departure_time_actual > self.departure_time_planned:
delay['departure_delay'] = self.departure_time_actual - self.departure_time_planned
delay['departure_time'] = self.departure_time_actual
if self.requested_time != self.departure_time_actual:
delay['requested_differs'] = self.departure_time_actual
for part in self.trip_parts:
if part.has_delay:
delay['parts'].append(part)
return delay
def has_delay(self, arrival_check=True):
if self.status != 'VOLGENS-PLAN':
return True
for subpart in self.trip_parts:
if subpart.has_delay:
if subpart == self.trip_parts[-1]:
# Is last part of the trip, check if it is only the arrival
return subpart.has_departure_delay(arrival_check)
return True
if self.requested_time != self.departure_time_actual:
return True
return False
def __getstate__(self):
result = super(Trip, self).__getstate__()
result['requested_time'] = result['requested_time'].isoformat()
result['departure_time_actual'] = result['departure_time_actual'].isoformat()
result['arrival_time_actual'] = result['arrival_time_actual'].isoformat()
result['departure_time_planned'] = result['departure_time_planned'].isoformat()
result['arrival_time_planned'] = result['arrival_time_planned'].isoformat()
trip_parts = []
for trip_part in result['trip_parts']:
trip_parts.append(trip_part.to_json())
result['trip_parts'] = trip_parts
trip_remarks = []
for trip_remark in result['trip_remarks']:
trip_remarks.append(trip_remark.to_json())
result['trip_remarks'] = trip_remarks
return result
def __setstate__(self, source_dict):
super(Trip, self).__setstate__(source_dict)
# TripSubpart deserialisation
trip_parts = []
subparts = self.trip_parts
for part in subparts:
subpart = TripSubpart()
subpart.from_json(part)
trip_parts.append(subpart)
self.trip_parts = trip_parts
# TripRemark deserialisation
trip_remarks = []
remarks = self.trip_remarks
for raw_remark in remarks:
remark = TripRemark()
remark.from_json(raw_remark)
trip_remarks.append(remark)
self.trip_remarks = trip_remarks
# Datetime stamps
self.departure_time_planned = load_datetime(self.departure_time_planned, NS_DATETIME)
self.departure_time_actual = load_datetime(self.departure_time_actual, NS_DATETIME)
self.arrival_time_planned = load_datetime(self.arrival_time_planned, NS_DATETIME)
self.arrival_time_actual = load_datetime(self.arrival_time_actual, NS_DATETIME)
self.requested_time = load_datetime(self.requested_time, NS_DATETIME)
def delay_text(self):
"""
If trip has delays, format a natural language summary
"""
# TODO implement
pass
@classmethod
def get_actual(cls, trip_list, time):
"""
Look for the train actually leaving at time
"""
for trip in trip_list:
if simple_time(trip.departure_time_planned) == time:
return trip
return None
@classmethod
def get_optimal(cls, trip_list):
"""
Look for the optimal trip in the list
"""
for trip in trip_list:
if trip.is_optimal:
return trip
return None
def __str__(self):
return u'<Trip> {0} plan: {1} actual: {2} transfers: {3}'.format(self.has_delay, self.departure_time_planned, self.departure_time_actual, self.nr_transfers)
|
aquatix/ns-api
|
ns_api.py
|
Trip.get_actual
|
python
|
def get_actual(cls, trip_list, time):
for trip in trip_list:
if simple_time(trip.departure_time_planned) == time:
return trip
return None
|
Look for the train actually leaving at time
|
train
|
https://github.com/aquatix/ns-api/blob/9b3379f8df6217132f457c4363457c16321c2448/ns_api.py#L674-L681
|
[
"def simple_time(value):\n \"\"\"\n Format a datetime or timedelta object to a string of format HH:MM\n \"\"\"\n if isinstance(value, timedelta):\n return ':'.join(str(value).split(':')[:2])\n return datetime_to_string(value, '%H:%M')\n"
] |
class Trip(BaseObject):
"""
Suggested route for the provided departure/destination combination
"""
def __init__(self, trip_dict=None, datetime=None):
if trip_dict is None:
return
# self.key = ??
try:
# VOLGENS-PLAN, GEWIJZIGD, VERTRAAGD, NIEUW, NIET-OPTIMAAL, NIET-MOGELIJK, PLAN-GEWIJZIGD
self.status = trip_dict['Status']
except KeyError:
self.status = None
self.nr_transfers = trip_dict['AantalOverstappen']
try:
self.travel_time_planned = trip_dict['GeplandeReisTijd']
self.going = True
except KeyError:
# Train has been cancelled
self.travel_time_planned = None
self.going = False
if self.status == 'NIET-MOGELIJK':
# Train has been cancelled
self.going = False
self.travel_time_actual = trip_dict['ActueleReisTijd']
self.is_optimal = True if trip_dict['Optimaal'] == 'true' else False
dt_format = "%Y-%m-%dT%H:%M:%S%z"
self.requested_time = datetime
try:
self.departure_time_planned = load_datetime(trip_dict['GeplandeVertrekTijd'], dt_format)
except:
self.departure_time_planned = None
try:
self.departure_time_actual = load_datetime(trip_dict['ActueleVertrekTijd'], dt_format)
except:
self.departure_time_actual = None
try:
self.arrival_time_planned = load_datetime(trip_dict['GeplandeAankomstTijd'], dt_format)
except:
self.arrival_time_planned = None
try:
self.arrival_time_actual = load_datetime(trip_dict['ActueleAankomstTijd'], dt_format)
except:
self.arrival_time_actual = None
self.trip_parts = []
raw_parts = trip_dict['ReisDeel']
if isinstance(trip_dict['ReisDeel'], collections.OrderedDict):
raw_parts = [trip_dict['ReisDeel']]
for part in raw_parts:
trip_part = TripSubpart(part)
self.trip_parts.append(trip_part)
try:
raw_remarks = trip_dict['Melding']
self.trip_remarks = []
if isinstance(raw_remarks, collections.OrderedDict):
raw_remarks = [raw_remarks]
for remark in raw_remarks:
trip_remark = TripRemark(remark)
self.trip_remarks.append(trip_remark)
except KeyError:
self.trip_remarks = []
@property
def departure(self):
return self.trip_parts[0].stops[0].name
@property
def destination(self):
return self.trip_parts[-1].stops[-1].name
@property
def delay(self):
"""
Return the delay of the train for this instance
"""
delay = {'departure_time': None, 'departure_delay': None, 'requested_differs': None,
'remarks': self.trip_remarks, 'parts': []}
if self.departure_time_actual > self.departure_time_planned:
delay['departure_delay'] = self.departure_time_actual - self.departure_time_planned
delay['departure_time'] = self.departure_time_actual
if self.requested_time != self.departure_time_actual:
delay['requested_differs'] = self.departure_time_actual
for part in self.trip_parts:
if part.has_delay:
delay['parts'].append(part)
return delay
def has_delay(self, arrival_check=True):
if self.status != 'VOLGENS-PLAN':
return True
for subpart in self.trip_parts:
if subpart.has_delay:
if subpart == self.trip_parts[-1]:
# Is last part of the trip, check if it is only the arrival
return subpart.has_departure_delay(arrival_check)
return True
if self.requested_time != self.departure_time_actual:
return True
return False
def has_departure_delay(self, subpartcheck=True):
"""
Deprecated
"""
if self.status != 'VOLGENS-PLAN':
return True
if subpartcheck and self.trip_parts[0].has_delay:
return True
if self.requested_time != self.departure_time_actual:
return True
return False
def __getstate__(self):
result = super(Trip, self).__getstate__()
result['requested_time'] = result['requested_time'].isoformat()
result['departure_time_actual'] = result['departure_time_actual'].isoformat()
result['arrival_time_actual'] = result['arrival_time_actual'].isoformat()
result['departure_time_planned'] = result['departure_time_planned'].isoformat()
result['arrival_time_planned'] = result['arrival_time_planned'].isoformat()
trip_parts = []
for trip_part in result['trip_parts']:
trip_parts.append(trip_part.to_json())
result['trip_parts'] = trip_parts
trip_remarks = []
for trip_remark in result['trip_remarks']:
trip_remarks.append(trip_remark.to_json())
result['trip_remarks'] = trip_remarks
return result
def __setstate__(self, source_dict):
super(Trip, self).__setstate__(source_dict)
# TripSubpart deserialisation
trip_parts = []
subparts = self.trip_parts
for part in subparts:
subpart = TripSubpart()
subpart.from_json(part)
trip_parts.append(subpart)
self.trip_parts = trip_parts
# TripRemark deserialisation
trip_remarks = []
remarks = self.trip_remarks
for raw_remark in remarks:
remark = TripRemark()
remark.from_json(raw_remark)
trip_remarks.append(remark)
self.trip_remarks = trip_remarks
# Datetime stamps
self.departure_time_planned = load_datetime(self.departure_time_planned, NS_DATETIME)
self.departure_time_actual = load_datetime(self.departure_time_actual, NS_DATETIME)
self.arrival_time_planned = load_datetime(self.arrival_time_planned, NS_DATETIME)
self.arrival_time_actual = load_datetime(self.arrival_time_actual, NS_DATETIME)
self.requested_time = load_datetime(self.requested_time, NS_DATETIME)
def delay_text(self):
"""
If trip has delays, format a natural language summary
"""
# TODO implement
pass
@classmethod
@classmethod
def get_optimal(cls, trip_list):
"""
Look for the optimal trip in the list
"""
for trip in trip_list:
if trip.is_optimal:
return trip
return None
def __str__(self):
return u'<Trip> {0} plan: {1} actual: {2} transfers: {3}'.format(self.has_delay, self.departure_time_planned, self.departure_time_actual, self.nr_transfers)
|
aquatix/ns-api
|
ns_api.py
|
NSAPI.parse_disruptions
|
python
|
def parse_disruptions(self, xml):
obj = xmltodict.parse(xml)
disruptions = {}
disruptions['unplanned'] = []
disruptions['planned'] = []
if obj['Storingen']['Ongepland']:
raw_disruptions = obj['Storingen']['Ongepland']['Storing']
if isinstance(raw_disruptions, collections.OrderedDict):
raw_disruptions = [raw_disruptions]
for disruption in raw_disruptions:
newdis = Disruption(disruption)
#print(newdis.__dict__)
disruptions['unplanned'].append(newdis)
if obj['Storingen']['Gepland']:
raw_disruptions = obj['Storingen']['Gepland']['Storing']
if isinstance(raw_disruptions, collections.OrderedDict):
raw_disruptions = [raw_disruptions]
for disruption in raw_disruptions:
newdis = Disruption(disruption)
#print(newdis.__dict__)
disruptions['planned'].append(newdis)
return disruptions
|
Parse the NS API xml result into Disruption objects
@param xml: raw XML result from the NS API
|
train
|
https://github.com/aquatix/ns-api/blob/9b3379f8df6217132f457c4363457c16321c2448/ns_api.py#L729-L756
| null |
class NSAPI(object):
"""
NS API object
Library to query the official Dutch railways API
"""
def __init__(self, username, apikey):
self.username = username
self.apikey = apikey
def _request(self, method, url, postdata=None, params=None):
headers = {"Accept": "application/xml",
"Content-Type": "application/xml",
"User-Agent": "ns_api"}
if postdata:
postdata = json.dumps(postdata)
r = requests.request(method,
url,
data=postdata,
params=params,
headers=headers,
files=None,
auth=HTTPBasicAuth(self.username, self.apikey))
r.encoding = 'utf-8'
r.raise_for_status()
return r.text
def get_disruptions(self, station=None, actual=True, unplanned=True):
"""
Fetch the current disruptions, or even the planned ones
@param station: station to lookup
@param actual: only actual disruptions, or a
actuele storingen (=ongeplande storingen + actuele werkzaamheden)
geplande werkzaamheden (=geplande werkzaamheden)
actuele storingen voor een gespecificeerd station (=ongeplande storingen + actuele werkzaamheden)
"""
url = "http://webservices.ns.nl/ns-api-storingen?station=${Stationsnaam}&actual=${true or false}&unplanned=${true or false}"
url = "http://webservices.ns.nl/ns-api-storingen?actual=true&unplanned=true"
raw_disruptions = self._request('GET', url)
return self.parse_disruptions(raw_disruptions)
def parse_departures(self, xml):
"""
Parse the NS API xml result into Departure objects
@param xml: raw XML result from the NS API
"""
obj = xmltodict.parse(xml)
departures = []
for departure in obj['ActueleVertrekTijden']['VertrekkendeTrein']:
newdep = Departure(departure)
departures.append(newdep)
#print('-- dep --')
#print(newdep.__dict__)
#print(newdep.to_json())
print(newdep.delay)
return departures
def get_departures(self, station):
"""
Fetch the current departure times from this station
http://webservices.ns.nl/ns-api-avt?station=${Naam of afkorting Station}
@param station: station to lookup
"""
url = 'http://webservices.ns.nl/ns-api-avt?station=' + station
raw_departures = self._request('GET', url)
return self.parse_departures(raw_departures)
def parse_trips(self, xml, requested_time):
"""
Parse the NS API xml result into Trip objects
"""
obj = xmltodict.parse(xml)
trips = []
if 'error' in obj:
print('Error in trips: ' + obj['error']['message'])
return None
try:
for trip in obj['ReisMogelijkheden']['ReisMogelijkheid']:
newtrip = Trip(trip, requested_time)
trips.append(newtrip)
except TypeError:
# If no options are found, obj['ReisMogelijkheden'] is None
return None
return trips
def get_trips(self, timestamp, start, via, destination, departure=True, prev_advices=1, next_advices=1):
"""
Fetch trip possibilities for these parameters
http://webservices.ns.nl/ns-api-treinplanner?<parameters>
fromStation
toStation
dateTime: 2012-02-21T15:50
departure: true for starting at timestamp, false for arriving at timestamp
previousAdvices
nextAdvices
"""
timezonestring = '+0100'
if is_dst('Europe/Amsterdam'):
timezonestring = '+0200'
url = 'http://webservices.ns.nl/ns-api-treinplanner?'
url = url + 'fromStation=' + start
url = url + '&toStation=' + destination
if via:
url = url + '&via=' + via
if len(timestamp) == 5:
# Format of HH:MM - api needs yyyy-mm-ddThh:mm
timestamp = time.strftime("%Y-%m-%d") + 'T' + timestamp
#requested_time = datetime.strptime(timestamp, "%Y-%m-%dT%H:%M")
# TODO: DST/normal time
requested_time = load_datetime(timestamp + timezonestring, "%Y-%m-%dT%H:%M%z")
else:
#requested_time = datetime.strptime(timestamp, "%d-%m-%Y %H:%M")
requested_time = load_datetime(timestamp + timezonestring, "%d-%m-%Y %H:%M%z")
timestamp = datetime.strptime(timestamp, "%d-%m-%Y %H:%M").strftime("%Y-%m-%dT%H:%M")
url = url + '&previousAdvices=' + str(prev_advices)
url = url + '&nextAdvices=' + str(next_advices)
url = url + '&dateTime=' + timestamp
raw_trips = self._request('GET', url)
return self.parse_trips(raw_trips, requested_time)
def parse_stations(self, xml):
obj = xmltodict.parse(xml)
stations = []
for station in obj['Stations']['Station']:
newstat = Station(station)
stations.append(newstat)
print(len(stations))
return stations
def get_stations(self):
"""
Fetch the list of stations
"""
url = 'http://webservices.ns.nl/ns-api-stations-v2'
raw_stations = self._request('GET', url)
return self.parse_stations(raw_stations)
|
aquatix/ns-api
|
ns_api.py
|
NSAPI.get_disruptions
|
python
|
def get_disruptions(self, station=None, actual=True, unplanned=True):
url = "http://webservices.ns.nl/ns-api-storingen?station=${Stationsnaam}&actual=${true or false}&unplanned=${true or false}"
url = "http://webservices.ns.nl/ns-api-storingen?actual=true&unplanned=true"
raw_disruptions = self._request('GET', url)
return self.parse_disruptions(raw_disruptions)
|
Fetch the current disruptions, or even the planned ones
@param station: station to lookup
@param actual: only actual disruptions, or a
actuele storingen (=ongeplande storingen + actuele werkzaamheden)
geplande werkzaamheden (=geplande werkzaamheden)
actuele storingen voor een gespecificeerd station (=ongeplande storingen + actuele werkzaamheden)
|
train
|
https://github.com/aquatix/ns-api/blob/9b3379f8df6217132f457c4363457c16321c2448/ns_api.py#L759-L773
|
[
"def _request(self, method, url, postdata=None, params=None):\n headers = {\"Accept\": \"application/xml\",\n \"Content-Type\": \"application/xml\",\n \"User-Agent\": \"ns_api\"}\n\n if postdata:\n postdata = json.dumps(postdata)\n\n r = requests.request(method,\n url,\n data=postdata,\n params=params,\n headers=headers,\n files=None,\n auth=HTTPBasicAuth(self.username, self.apikey))\n\n r.encoding = 'utf-8'\n r.raise_for_status()\n return r.text\n",
"def parse_disruptions(self, xml):\n \"\"\"\n Parse the NS API xml result into Disruption objects\n @param xml: raw XML result from the NS API\n \"\"\"\n obj = xmltodict.parse(xml)\n disruptions = {}\n disruptions['unplanned'] = []\n disruptions['planned'] = []\n\n if obj['Storingen']['Ongepland']:\n raw_disruptions = obj['Storingen']['Ongepland']['Storing']\n if isinstance(raw_disruptions, collections.OrderedDict):\n raw_disruptions = [raw_disruptions]\n for disruption in raw_disruptions:\n newdis = Disruption(disruption)\n #print(newdis.__dict__)\n disruptions['unplanned'].append(newdis)\n\n if obj['Storingen']['Gepland']:\n raw_disruptions = obj['Storingen']['Gepland']['Storing']\n if isinstance(raw_disruptions, collections.OrderedDict):\n raw_disruptions = [raw_disruptions]\n for disruption in raw_disruptions:\n newdis = Disruption(disruption)\n #print(newdis.__dict__)\n disruptions['planned'].append(newdis)\n return disruptions\n"
] |
class NSAPI(object):
"""
NS API object
Library to query the official Dutch railways API
"""
def __init__(self, username, apikey):
self.username = username
self.apikey = apikey
def _request(self, method, url, postdata=None, params=None):
headers = {"Accept": "application/xml",
"Content-Type": "application/xml",
"User-Agent": "ns_api"}
if postdata:
postdata = json.dumps(postdata)
r = requests.request(method,
url,
data=postdata,
params=params,
headers=headers,
files=None,
auth=HTTPBasicAuth(self.username, self.apikey))
r.encoding = 'utf-8'
r.raise_for_status()
return r.text
def parse_disruptions(self, xml):
"""
Parse the NS API xml result into Disruption objects
@param xml: raw XML result from the NS API
"""
obj = xmltodict.parse(xml)
disruptions = {}
disruptions['unplanned'] = []
disruptions['planned'] = []
if obj['Storingen']['Ongepland']:
raw_disruptions = obj['Storingen']['Ongepland']['Storing']
if isinstance(raw_disruptions, collections.OrderedDict):
raw_disruptions = [raw_disruptions]
for disruption in raw_disruptions:
newdis = Disruption(disruption)
#print(newdis.__dict__)
disruptions['unplanned'].append(newdis)
if obj['Storingen']['Gepland']:
raw_disruptions = obj['Storingen']['Gepland']['Storing']
if isinstance(raw_disruptions, collections.OrderedDict):
raw_disruptions = [raw_disruptions]
for disruption in raw_disruptions:
newdis = Disruption(disruption)
#print(newdis.__dict__)
disruptions['planned'].append(newdis)
return disruptions
def parse_departures(self, xml):
"""
Parse the NS API xml result into Departure objects
@param xml: raw XML result from the NS API
"""
obj = xmltodict.parse(xml)
departures = []
for departure in obj['ActueleVertrekTijden']['VertrekkendeTrein']:
newdep = Departure(departure)
departures.append(newdep)
#print('-- dep --')
#print(newdep.__dict__)
#print(newdep.to_json())
print(newdep.delay)
return departures
def get_departures(self, station):
"""
Fetch the current departure times from this station
http://webservices.ns.nl/ns-api-avt?station=${Naam of afkorting Station}
@param station: station to lookup
"""
url = 'http://webservices.ns.nl/ns-api-avt?station=' + station
raw_departures = self._request('GET', url)
return self.parse_departures(raw_departures)
def parse_trips(self, xml, requested_time):
"""
Parse the NS API xml result into Trip objects
"""
obj = xmltodict.parse(xml)
trips = []
if 'error' in obj:
print('Error in trips: ' + obj['error']['message'])
return None
try:
for trip in obj['ReisMogelijkheden']['ReisMogelijkheid']:
newtrip = Trip(trip, requested_time)
trips.append(newtrip)
except TypeError:
# If no options are found, obj['ReisMogelijkheden'] is None
return None
return trips
def get_trips(self, timestamp, start, via, destination, departure=True, prev_advices=1, next_advices=1):
"""
Fetch trip possibilities for these parameters
http://webservices.ns.nl/ns-api-treinplanner?<parameters>
fromStation
toStation
dateTime: 2012-02-21T15:50
departure: true for starting at timestamp, false for arriving at timestamp
previousAdvices
nextAdvices
"""
timezonestring = '+0100'
if is_dst('Europe/Amsterdam'):
timezonestring = '+0200'
url = 'http://webservices.ns.nl/ns-api-treinplanner?'
url = url + 'fromStation=' + start
url = url + '&toStation=' + destination
if via:
url = url + '&via=' + via
if len(timestamp) == 5:
# Format of HH:MM - api needs yyyy-mm-ddThh:mm
timestamp = time.strftime("%Y-%m-%d") + 'T' + timestamp
#requested_time = datetime.strptime(timestamp, "%Y-%m-%dT%H:%M")
# TODO: DST/normal time
requested_time = load_datetime(timestamp + timezonestring, "%Y-%m-%dT%H:%M%z")
else:
#requested_time = datetime.strptime(timestamp, "%d-%m-%Y %H:%M")
requested_time = load_datetime(timestamp + timezonestring, "%d-%m-%Y %H:%M%z")
timestamp = datetime.strptime(timestamp, "%d-%m-%Y %H:%M").strftime("%Y-%m-%dT%H:%M")
url = url + '&previousAdvices=' + str(prev_advices)
url = url + '&nextAdvices=' + str(next_advices)
url = url + '&dateTime=' + timestamp
raw_trips = self._request('GET', url)
return self.parse_trips(raw_trips, requested_time)
def parse_stations(self, xml):
obj = xmltodict.parse(xml)
stations = []
for station in obj['Stations']['Station']:
newstat = Station(station)
stations.append(newstat)
print(len(stations))
return stations
def get_stations(self):
"""
Fetch the list of stations
"""
url = 'http://webservices.ns.nl/ns-api-stations-v2'
raw_stations = self._request('GET', url)
return self.parse_stations(raw_stations)
|
aquatix/ns-api
|
ns_api.py
|
NSAPI.parse_departures
|
python
|
def parse_departures(self, xml):
obj = xmltodict.parse(xml)
departures = []
for departure in obj['ActueleVertrekTijden']['VertrekkendeTrein']:
newdep = Departure(departure)
departures.append(newdep)
#print('-- dep --')
#print(newdep.__dict__)
#print(newdep.to_json())
print(newdep.delay)
return departures
|
Parse the NS API xml result into Departure objects
@param xml: raw XML result from the NS API
|
train
|
https://github.com/aquatix/ns-api/blob/9b3379f8df6217132f457c4363457c16321c2448/ns_api.py#L776-L792
| null |
class NSAPI(object):
"""
NS API object
Library to query the official Dutch railways API
"""
def __init__(self, username, apikey):
self.username = username
self.apikey = apikey
def _request(self, method, url, postdata=None, params=None):
headers = {"Accept": "application/xml",
"Content-Type": "application/xml",
"User-Agent": "ns_api"}
if postdata:
postdata = json.dumps(postdata)
r = requests.request(method,
url,
data=postdata,
params=params,
headers=headers,
files=None,
auth=HTTPBasicAuth(self.username, self.apikey))
r.encoding = 'utf-8'
r.raise_for_status()
return r.text
def parse_disruptions(self, xml):
"""
Parse the NS API xml result into Disruption objects
@param xml: raw XML result from the NS API
"""
obj = xmltodict.parse(xml)
disruptions = {}
disruptions['unplanned'] = []
disruptions['planned'] = []
if obj['Storingen']['Ongepland']:
raw_disruptions = obj['Storingen']['Ongepland']['Storing']
if isinstance(raw_disruptions, collections.OrderedDict):
raw_disruptions = [raw_disruptions]
for disruption in raw_disruptions:
newdis = Disruption(disruption)
#print(newdis.__dict__)
disruptions['unplanned'].append(newdis)
if obj['Storingen']['Gepland']:
raw_disruptions = obj['Storingen']['Gepland']['Storing']
if isinstance(raw_disruptions, collections.OrderedDict):
raw_disruptions = [raw_disruptions]
for disruption in raw_disruptions:
newdis = Disruption(disruption)
#print(newdis.__dict__)
disruptions['planned'].append(newdis)
return disruptions
def get_disruptions(self, station=None, actual=True, unplanned=True):
"""
Fetch the current disruptions, or even the planned ones
@param station: station to lookup
@param actual: only actual disruptions, or a
actuele storingen (=ongeplande storingen + actuele werkzaamheden)
geplande werkzaamheden (=geplande werkzaamheden)
actuele storingen voor een gespecificeerd station (=ongeplande storingen + actuele werkzaamheden)
"""
url = "http://webservices.ns.nl/ns-api-storingen?station=${Stationsnaam}&actual=${true or false}&unplanned=${true or false}"
url = "http://webservices.ns.nl/ns-api-storingen?actual=true&unplanned=true"
raw_disruptions = self._request('GET', url)
return self.parse_disruptions(raw_disruptions)
def get_departures(self, station):
"""
Fetch the current departure times from this station
http://webservices.ns.nl/ns-api-avt?station=${Naam of afkorting Station}
@param station: station to lookup
"""
url = 'http://webservices.ns.nl/ns-api-avt?station=' + station
raw_departures = self._request('GET', url)
return self.parse_departures(raw_departures)
def parse_trips(self, xml, requested_time):
"""
Parse the NS API xml result into Trip objects
"""
obj = xmltodict.parse(xml)
trips = []
if 'error' in obj:
print('Error in trips: ' + obj['error']['message'])
return None
try:
for trip in obj['ReisMogelijkheden']['ReisMogelijkheid']:
newtrip = Trip(trip, requested_time)
trips.append(newtrip)
except TypeError:
# If no options are found, obj['ReisMogelijkheden'] is None
return None
return trips
def get_trips(self, timestamp, start, via, destination, departure=True, prev_advices=1, next_advices=1):
"""
Fetch trip possibilities for these parameters
http://webservices.ns.nl/ns-api-treinplanner?<parameters>
fromStation
toStation
dateTime: 2012-02-21T15:50
departure: true for starting at timestamp, false for arriving at timestamp
previousAdvices
nextAdvices
"""
timezonestring = '+0100'
if is_dst('Europe/Amsterdam'):
timezonestring = '+0200'
url = 'http://webservices.ns.nl/ns-api-treinplanner?'
url = url + 'fromStation=' + start
url = url + '&toStation=' + destination
if via:
url = url + '&via=' + via
if len(timestamp) == 5:
# Format of HH:MM - api needs yyyy-mm-ddThh:mm
timestamp = time.strftime("%Y-%m-%d") + 'T' + timestamp
#requested_time = datetime.strptime(timestamp, "%Y-%m-%dT%H:%M")
# TODO: DST/normal time
requested_time = load_datetime(timestamp + timezonestring, "%Y-%m-%dT%H:%M%z")
else:
#requested_time = datetime.strptime(timestamp, "%d-%m-%Y %H:%M")
requested_time = load_datetime(timestamp + timezonestring, "%d-%m-%Y %H:%M%z")
timestamp = datetime.strptime(timestamp, "%d-%m-%Y %H:%M").strftime("%Y-%m-%dT%H:%M")
url = url + '&previousAdvices=' + str(prev_advices)
url = url + '&nextAdvices=' + str(next_advices)
url = url + '&dateTime=' + timestamp
raw_trips = self._request('GET', url)
return self.parse_trips(raw_trips, requested_time)
def parse_stations(self, xml):
obj = xmltodict.parse(xml)
stations = []
for station in obj['Stations']['Station']:
newstat = Station(station)
stations.append(newstat)
print(len(stations))
return stations
def get_stations(self):
"""
Fetch the list of stations
"""
url = 'http://webservices.ns.nl/ns-api-stations-v2'
raw_stations = self._request('GET', url)
return self.parse_stations(raw_stations)
|
aquatix/ns-api
|
ns_api.py
|
NSAPI.get_departures
|
python
|
def get_departures(self, station):
url = 'http://webservices.ns.nl/ns-api-avt?station=' + station
raw_departures = self._request('GET', url)
return self.parse_departures(raw_departures)
|
Fetch the current departure times from this station
http://webservices.ns.nl/ns-api-avt?station=${Naam of afkorting Station}
@param station: station to lookup
|
train
|
https://github.com/aquatix/ns-api/blob/9b3379f8df6217132f457c4363457c16321c2448/ns_api.py#L795-L804
|
[
"def _request(self, method, url, postdata=None, params=None):\n headers = {\"Accept\": \"application/xml\",\n \"Content-Type\": \"application/xml\",\n \"User-Agent\": \"ns_api\"}\n\n if postdata:\n postdata = json.dumps(postdata)\n\n r = requests.request(method,\n url,\n data=postdata,\n params=params,\n headers=headers,\n files=None,\n auth=HTTPBasicAuth(self.username, self.apikey))\n\n r.encoding = 'utf-8'\n r.raise_for_status()\n return r.text\n",
"def parse_departures(self, xml):\n \"\"\"\n Parse the NS API xml result into Departure objects\n @param xml: raw XML result from the NS API\n \"\"\"\n obj = xmltodict.parse(xml)\n departures = []\n\n for departure in obj['ActueleVertrekTijden']['VertrekkendeTrein']:\n newdep = Departure(departure)\n departures.append(newdep)\n #print('-- dep --')\n #print(newdep.__dict__)\n #print(newdep.to_json())\n print(newdep.delay)\n\n return departures\n"
] |
class NSAPI(object):
"""
NS API object
Library to query the official Dutch railways API
"""
def __init__(self, username, apikey):
self.username = username
self.apikey = apikey
def _request(self, method, url, postdata=None, params=None):
headers = {"Accept": "application/xml",
"Content-Type": "application/xml",
"User-Agent": "ns_api"}
if postdata:
postdata = json.dumps(postdata)
r = requests.request(method,
url,
data=postdata,
params=params,
headers=headers,
files=None,
auth=HTTPBasicAuth(self.username, self.apikey))
r.encoding = 'utf-8'
r.raise_for_status()
return r.text
def parse_disruptions(self, xml):
"""
Parse the NS API xml result into Disruption objects
@param xml: raw XML result from the NS API
"""
obj = xmltodict.parse(xml)
disruptions = {}
disruptions['unplanned'] = []
disruptions['planned'] = []
if obj['Storingen']['Ongepland']:
raw_disruptions = obj['Storingen']['Ongepland']['Storing']
if isinstance(raw_disruptions, collections.OrderedDict):
raw_disruptions = [raw_disruptions]
for disruption in raw_disruptions:
newdis = Disruption(disruption)
#print(newdis.__dict__)
disruptions['unplanned'].append(newdis)
if obj['Storingen']['Gepland']:
raw_disruptions = obj['Storingen']['Gepland']['Storing']
if isinstance(raw_disruptions, collections.OrderedDict):
raw_disruptions = [raw_disruptions]
for disruption in raw_disruptions:
newdis = Disruption(disruption)
#print(newdis.__dict__)
disruptions['planned'].append(newdis)
return disruptions
def get_disruptions(self, station=None, actual=True, unplanned=True):
"""
Fetch the current disruptions, or even the planned ones
@param station: station to lookup
@param actual: only actual disruptions, or a
actuele storingen (=ongeplande storingen + actuele werkzaamheden)
geplande werkzaamheden (=geplande werkzaamheden)
actuele storingen voor een gespecificeerd station (=ongeplande storingen + actuele werkzaamheden)
"""
url = "http://webservices.ns.nl/ns-api-storingen?station=${Stationsnaam}&actual=${true or false}&unplanned=${true or false}"
url = "http://webservices.ns.nl/ns-api-storingen?actual=true&unplanned=true"
raw_disruptions = self._request('GET', url)
return self.parse_disruptions(raw_disruptions)
def parse_departures(self, xml):
"""
Parse the NS API xml result into Departure objects
@param xml: raw XML result from the NS API
"""
obj = xmltodict.parse(xml)
departures = []
for departure in obj['ActueleVertrekTijden']['VertrekkendeTrein']:
newdep = Departure(departure)
departures.append(newdep)
#print('-- dep --')
#print(newdep.__dict__)
#print(newdep.to_json())
print(newdep.delay)
return departures
def parse_trips(self, xml, requested_time):
"""
Parse the NS API xml result into Trip objects
"""
obj = xmltodict.parse(xml)
trips = []
if 'error' in obj:
print('Error in trips: ' + obj['error']['message'])
return None
try:
for trip in obj['ReisMogelijkheden']['ReisMogelijkheid']:
newtrip = Trip(trip, requested_time)
trips.append(newtrip)
except TypeError:
# If no options are found, obj['ReisMogelijkheden'] is None
return None
return trips
def get_trips(self, timestamp, start, via, destination, departure=True, prev_advices=1, next_advices=1):
"""
Fetch trip possibilities for these parameters
http://webservices.ns.nl/ns-api-treinplanner?<parameters>
fromStation
toStation
dateTime: 2012-02-21T15:50
departure: true for starting at timestamp, false for arriving at timestamp
previousAdvices
nextAdvices
"""
timezonestring = '+0100'
if is_dst('Europe/Amsterdam'):
timezonestring = '+0200'
url = 'http://webservices.ns.nl/ns-api-treinplanner?'
url = url + 'fromStation=' + start
url = url + '&toStation=' + destination
if via:
url = url + '&via=' + via
if len(timestamp) == 5:
# Format of HH:MM - api needs yyyy-mm-ddThh:mm
timestamp = time.strftime("%Y-%m-%d") + 'T' + timestamp
#requested_time = datetime.strptime(timestamp, "%Y-%m-%dT%H:%M")
# TODO: DST/normal time
requested_time = load_datetime(timestamp + timezonestring, "%Y-%m-%dT%H:%M%z")
else:
#requested_time = datetime.strptime(timestamp, "%d-%m-%Y %H:%M")
requested_time = load_datetime(timestamp + timezonestring, "%d-%m-%Y %H:%M%z")
timestamp = datetime.strptime(timestamp, "%d-%m-%Y %H:%M").strftime("%Y-%m-%dT%H:%M")
url = url + '&previousAdvices=' + str(prev_advices)
url = url + '&nextAdvices=' + str(next_advices)
url = url + '&dateTime=' + timestamp
raw_trips = self._request('GET', url)
return self.parse_trips(raw_trips, requested_time)
def parse_stations(self, xml):
obj = xmltodict.parse(xml)
stations = []
for station in obj['Stations']['Station']:
newstat = Station(station)
stations.append(newstat)
print(len(stations))
return stations
def get_stations(self):
"""
Fetch the list of stations
"""
url = 'http://webservices.ns.nl/ns-api-stations-v2'
raw_stations = self._request('GET', url)
return self.parse_stations(raw_stations)
|
aquatix/ns-api
|
ns_api.py
|
NSAPI.parse_trips
|
python
|
def parse_trips(self, xml, requested_time):
obj = xmltodict.parse(xml)
trips = []
if 'error' in obj:
print('Error in trips: ' + obj['error']['message'])
return None
try:
for trip in obj['ReisMogelijkheden']['ReisMogelijkheid']:
newtrip = Trip(trip, requested_time)
trips.append(newtrip)
except TypeError:
# If no options are found, obj['ReisMogelijkheden'] is None
return None
return trips
|
Parse the NS API xml result into Trip objects
|
train
|
https://github.com/aquatix/ns-api/blob/9b3379f8df6217132f457c4363457c16321c2448/ns_api.py#L807-L826
| null |
class NSAPI(object):
"""
NS API object
Library to query the official Dutch railways API
"""
def __init__(self, username, apikey):
self.username = username
self.apikey = apikey
def _request(self, method, url, postdata=None, params=None):
headers = {"Accept": "application/xml",
"Content-Type": "application/xml",
"User-Agent": "ns_api"}
if postdata:
postdata = json.dumps(postdata)
r = requests.request(method,
url,
data=postdata,
params=params,
headers=headers,
files=None,
auth=HTTPBasicAuth(self.username, self.apikey))
r.encoding = 'utf-8'
r.raise_for_status()
return r.text
def parse_disruptions(self, xml):
"""
Parse the NS API xml result into Disruption objects
@param xml: raw XML result from the NS API
"""
obj = xmltodict.parse(xml)
disruptions = {}
disruptions['unplanned'] = []
disruptions['planned'] = []
if obj['Storingen']['Ongepland']:
raw_disruptions = obj['Storingen']['Ongepland']['Storing']
if isinstance(raw_disruptions, collections.OrderedDict):
raw_disruptions = [raw_disruptions]
for disruption in raw_disruptions:
newdis = Disruption(disruption)
#print(newdis.__dict__)
disruptions['unplanned'].append(newdis)
if obj['Storingen']['Gepland']:
raw_disruptions = obj['Storingen']['Gepland']['Storing']
if isinstance(raw_disruptions, collections.OrderedDict):
raw_disruptions = [raw_disruptions]
for disruption in raw_disruptions:
newdis = Disruption(disruption)
#print(newdis.__dict__)
disruptions['planned'].append(newdis)
return disruptions
def get_disruptions(self, station=None, actual=True, unplanned=True):
"""
Fetch the current disruptions, or even the planned ones
@param station: station to lookup
@param actual: only actual disruptions, or a
actuele storingen (=ongeplande storingen + actuele werkzaamheden)
geplande werkzaamheden (=geplande werkzaamheden)
actuele storingen voor een gespecificeerd station (=ongeplande storingen + actuele werkzaamheden)
"""
url = "http://webservices.ns.nl/ns-api-storingen?station=${Stationsnaam}&actual=${true or false}&unplanned=${true or false}"
url = "http://webservices.ns.nl/ns-api-storingen?actual=true&unplanned=true"
raw_disruptions = self._request('GET', url)
return self.parse_disruptions(raw_disruptions)
def parse_departures(self, xml):
"""
Parse the NS API xml result into Departure objects
@param xml: raw XML result from the NS API
"""
obj = xmltodict.parse(xml)
departures = []
for departure in obj['ActueleVertrekTijden']['VertrekkendeTrein']:
newdep = Departure(departure)
departures.append(newdep)
#print('-- dep --')
#print(newdep.__dict__)
#print(newdep.to_json())
print(newdep.delay)
return departures
def get_departures(self, station):
"""
Fetch the current departure times from this station
http://webservices.ns.nl/ns-api-avt?station=${Naam of afkorting Station}
@param station: station to lookup
"""
url = 'http://webservices.ns.nl/ns-api-avt?station=' + station
raw_departures = self._request('GET', url)
return self.parse_departures(raw_departures)
def get_trips(self, timestamp, start, via, destination, departure=True, prev_advices=1, next_advices=1):
"""
Fetch trip possibilities for these parameters
http://webservices.ns.nl/ns-api-treinplanner?<parameters>
fromStation
toStation
dateTime: 2012-02-21T15:50
departure: true for starting at timestamp, false for arriving at timestamp
previousAdvices
nextAdvices
"""
timezonestring = '+0100'
if is_dst('Europe/Amsterdam'):
timezonestring = '+0200'
url = 'http://webservices.ns.nl/ns-api-treinplanner?'
url = url + 'fromStation=' + start
url = url + '&toStation=' + destination
if via:
url = url + '&via=' + via
if len(timestamp) == 5:
# Format of HH:MM - api needs yyyy-mm-ddThh:mm
timestamp = time.strftime("%Y-%m-%d") + 'T' + timestamp
#requested_time = datetime.strptime(timestamp, "%Y-%m-%dT%H:%M")
# TODO: DST/normal time
requested_time = load_datetime(timestamp + timezonestring, "%Y-%m-%dT%H:%M%z")
else:
#requested_time = datetime.strptime(timestamp, "%d-%m-%Y %H:%M")
requested_time = load_datetime(timestamp + timezonestring, "%d-%m-%Y %H:%M%z")
timestamp = datetime.strptime(timestamp, "%d-%m-%Y %H:%M").strftime("%Y-%m-%dT%H:%M")
url = url + '&previousAdvices=' + str(prev_advices)
url = url + '&nextAdvices=' + str(next_advices)
url = url + '&dateTime=' + timestamp
raw_trips = self._request('GET', url)
return self.parse_trips(raw_trips, requested_time)
def parse_stations(self, xml):
obj = xmltodict.parse(xml)
stations = []
for station in obj['Stations']['Station']:
newstat = Station(station)
stations.append(newstat)
print(len(stations))
return stations
def get_stations(self):
"""
Fetch the list of stations
"""
url = 'http://webservices.ns.nl/ns-api-stations-v2'
raw_stations = self._request('GET', url)
return self.parse_stations(raw_stations)
|
aquatix/ns-api
|
ns_api.py
|
NSAPI.get_trips
|
python
|
def get_trips(self, timestamp, start, via, destination, departure=True, prev_advices=1, next_advices=1):
timezonestring = '+0100'
if is_dst('Europe/Amsterdam'):
timezonestring = '+0200'
url = 'http://webservices.ns.nl/ns-api-treinplanner?'
url = url + 'fromStation=' + start
url = url + '&toStation=' + destination
if via:
url = url + '&via=' + via
if len(timestamp) == 5:
# Format of HH:MM - api needs yyyy-mm-ddThh:mm
timestamp = time.strftime("%Y-%m-%d") + 'T' + timestamp
#requested_time = datetime.strptime(timestamp, "%Y-%m-%dT%H:%M")
# TODO: DST/normal time
requested_time = load_datetime(timestamp + timezonestring, "%Y-%m-%dT%H:%M%z")
else:
#requested_time = datetime.strptime(timestamp, "%d-%m-%Y %H:%M")
requested_time = load_datetime(timestamp + timezonestring, "%d-%m-%Y %H:%M%z")
timestamp = datetime.strptime(timestamp, "%d-%m-%Y %H:%M").strftime("%Y-%m-%dT%H:%M")
url = url + '&previousAdvices=' + str(prev_advices)
url = url + '&nextAdvices=' + str(next_advices)
url = url + '&dateTime=' + timestamp
raw_trips = self._request('GET', url)
return self.parse_trips(raw_trips, requested_time)
|
Fetch trip possibilities for these parameters
http://webservices.ns.nl/ns-api-treinplanner?<parameters>
fromStation
toStation
dateTime: 2012-02-21T15:50
departure: true for starting at timestamp, false for arriving at timestamp
previousAdvices
nextAdvices
|
train
|
https://github.com/aquatix/ns-api/blob/9b3379f8df6217132f457c4363457c16321c2448/ns_api.py#L829-L862
|
[
"def is_dst(zonename):\n \"\"\"\n Find out whether it's Daylight Saving Time in this timezone\n \"\"\"\n tz = pytz.timezone(zonename)\n now = pytz.utc.localize(datetime.utcnow())\n return now.astimezone(tz).dst() != timedelta(0)\n",
"def load_datetime(value, dt_format):\n \"\"\"\n Create timezone-aware datetime object\n \"\"\"\n if dt_format.endswith('%z'):\n dt_format = dt_format[:-2]\n offset = value[-5:]\n value = value[:-5]\n if offset != offset.replace(':', ''):\n # strip : from HHMM if needed (isoformat() adds it between HH and MM)\n offset = '+' + offset.replace(':', '')\n value = value[:-1]\n return OffsetTime(offset).localize(datetime.strptime(value, dt_format))\n\n return datetime.strptime(value, dt_format)\n",
"def _request(self, method, url, postdata=None, params=None):\n headers = {\"Accept\": \"application/xml\",\n \"Content-Type\": \"application/xml\",\n \"User-Agent\": \"ns_api\"}\n\n if postdata:\n postdata = json.dumps(postdata)\n\n r = requests.request(method,\n url,\n data=postdata,\n params=params,\n headers=headers,\n files=None,\n auth=HTTPBasicAuth(self.username, self.apikey))\n\n r.encoding = 'utf-8'\n r.raise_for_status()\n return r.text\n",
"def parse_trips(self, xml, requested_time):\n \"\"\"\n Parse the NS API xml result into Trip objects\n \"\"\"\n obj = xmltodict.parse(xml)\n trips = []\n\n if 'error' in obj:\n print('Error in trips: ' + obj['error']['message'])\n return None\n\n try:\n for trip in obj['ReisMogelijkheden']['ReisMogelijkheid']:\n newtrip = Trip(trip, requested_time)\n trips.append(newtrip)\n except TypeError:\n # If no options are found, obj['ReisMogelijkheden'] is None\n return None\n\n return trips\n"
] |
class NSAPI(object):
"""
NS API object
Library to query the official Dutch railways API
"""
def __init__(self, username, apikey):
self.username = username
self.apikey = apikey
def _request(self, method, url, postdata=None, params=None):
headers = {"Accept": "application/xml",
"Content-Type": "application/xml",
"User-Agent": "ns_api"}
if postdata:
postdata = json.dumps(postdata)
r = requests.request(method,
url,
data=postdata,
params=params,
headers=headers,
files=None,
auth=HTTPBasicAuth(self.username, self.apikey))
r.encoding = 'utf-8'
r.raise_for_status()
return r.text
def parse_disruptions(self, xml):
"""
Parse the NS API xml result into Disruption objects
@param xml: raw XML result from the NS API
"""
obj = xmltodict.parse(xml)
disruptions = {}
disruptions['unplanned'] = []
disruptions['planned'] = []
if obj['Storingen']['Ongepland']:
raw_disruptions = obj['Storingen']['Ongepland']['Storing']
if isinstance(raw_disruptions, collections.OrderedDict):
raw_disruptions = [raw_disruptions]
for disruption in raw_disruptions:
newdis = Disruption(disruption)
#print(newdis.__dict__)
disruptions['unplanned'].append(newdis)
if obj['Storingen']['Gepland']:
raw_disruptions = obj['Storingen']['Gepland']['Storing']
if isinstance(raw_disruptions, collections.OrderedDict):
raw_disruptions = [raw_disruptions]
for disruption in raw_disruptions:
newdis = Disruption(disruption)
#print(newdis.__dict__)
disruptions['planned'].append(newdis)
return disruptions
def get_disruptions(self, station=None, actual=True, unplanned=True):
"""
Fetch the current disruptions, or even the planned ones
@param station: station to lookup
@param actual: only actual disruptions, or a
actuele storingen (=ongeplande storingen + actuele werkzaamheden)
geplande werkzaamheden (=geplande werkzaamheden)
actuele storingen voor een gespecificeerd station (=ongeplande storingen + actuele werkzaamheden)
"""
url = "http://webservices.ns.nl/ns-api-storingen?station=${Stationsnaam}&actual=${true or false}&unplanned=${true or false}"
url = "http://webservices.ns.nl/ns-api-storingen?actual=true&unplanned=true"
raw_disruptions = self._request('GET', url)
return self.parse_disruptions(raw_disruptions)
def parse_departures(self, xml):
"""
Parse the NS API xml result into Departure objects
@param xml: raw XML result from the NS API
"""
obj = xmltodict.parse(xml)
departures = []
for departure in obj['ActueleVertrekTijden']['VertrekkendeTrein']:
newdep = Departure(departure)
departures.append(newdep)
#print('-- dep --')
#print(newdep.__dict__)
#print(newdep.to_json())
print(newdep.delay)
return departures
def get_departures(self, station):
"""
Fetch the current departure times from this station
http://webservices.ns.nl/ns-api-avt?station=${Naam of afkorting Station}
@param station: station to lookup
"""
url = 'http://webservices.ns.nl/ns-api-avt?station=' + station
raw_departures = self._request('GET', url)
return self.parse_departures(raw_departures)
def parse_trips(self, xml, requested_time):
"""
Parse the NS API xml result into Trip objects
"""
obj = xmltodict.parse(xml)
trips = []
if 'error' in obj:
print('Error in trips: ' + obj['error']['message'])
return None
try:
for trip in obj['ReisMogelijkheden']['ReisMogelijkheid']:
newtrip = Trip(trip, requested_time)
trips.append(newtrip)
except TypeError:
# If no options are found, obj['ReisMogelijkheden'] is None
return None
return trips
def parse_stations(self, xml):
obj = xmltodict.parse(xml)
stations = []
for station in obj['Stations']['Station']:
newstat = Station(station)
stations.append(newstat)
print(len(stations))
return stations
def get_stations(self):
"""
Fetch the list of stations
"""
url = 'http://webservices.ns.nl/ns-api-stations-v2'
raw_stations = self._request('GET', url)
return self.parse_stations(raw_stations)
|
aquatix/ns-api
|
ns_api.py
|
NSAPI.get_stations
|
python
|
def get_stations(self):
url = 'http://webservices.ns.nl/ns-api-stations-v2'
raw_stations = self._request('GET', url)
return self.parse_stations(raw_stations)
|
Fetch the list of stations
|
train
|
https://github.com/aquatix/ns-api/blob/9b3379f8df6217132f457c4363457c16321c2448/ns_api.py#L877-L883
|
[
"def _request(self, method, url, postdata=None, params=None):\n headers = {\"Accept\": \"application/xml\",\n \"Content-Type\": \"application/xml\",\n \"User-Agent\": \"ns_api\"}\n\n if postdata:\n postdata = json.dumps(postdata)\n\n r = requests.request(method,\n url,\n data=postdata,\n params=params,\n headers=headers,\n files=None,\n auth=HTTPBasicAuth(self.username, self.apikey))\n\n r.encoding = 'utf-8'\n r.raise_for_status()\n return r.text\n",
"def parse_stations(self, xml):\n obj = xmltodict.parse(xml)\n stations = []\n\n for station in obj['Stations']['Station']:\n newstat = Station(station)\n stations.append(newstat)\n\n print(len(stations))\n return stations\n"
] |
class NSAPI(object):
"""
NS API object
Library to query the official Dutch railways API
"""
def __init__(self, username, apikey):
self.username = username
self.apikey = apikey
def _request(self, method, url, postdata=None, params=None):
headers = {"Accept": "application/xml",
"Content-Type": "application/xml",
"User-Agent": "ns_api"}
if postdata:
postdata = json.dumps(postdata)
r = requests.request(method,
url,
data=postdata,
params=params,
headers=headers,
files=None,
auth=HTTPBasicAuth(self.username, self.apikey))
r.encoding = 'utf-8'
r.raise_for_status()
return r.text
def parse_disruptions(self, xml):
"""
Parse the NS API xml result into Disruption objects
@param xml: raw XML result from the NS API
"""
obj = xmltodict.parse(xml)
disruptions = {}
disruptions['unplanned'] = []
disruptions['planned'] = []
if obj['Storingen']['Ongepland']:
raw_disruptions = obj['Storingen']['Ongepland']['Storing']
if isinstance(raw_disruptions, collections.OrderedDict):
raw_disruptions = [raw_disruptions]
for disruption in raw_disruptions:
newdis = Disruption(disruption)
#print(newdis.__dict__)
disruptions['unplanned'].append(newdis)
if obj['Storingen']['Gepland']:
raw_disruptions = obj['Storingen']['Gepland']['Storing']
if isinstance(raw_disruptions, collections.OrderedDict):
raw_disruptions = [raw_disruptions]
for disruption in raw_disruptions:
newdis = Disruption(disruption)
#print(newdis.__dict__)
disruptions['planned'].append(newdis)
return disruptions
def get_disruptions(self, station=None, actual=True, unplanned=True):
"""
Fetch the current disruptions, or even the planned ones
@param station: station to lookup
@param actual: only actual disruptions, or a
actuele storingen (=ongeplande storingen + actuele werkzaamheden)
geplande werkzaamheden (=geplande werkzaamheden)
actuele storingen voor een gespecificeerd station (=ongeplande storingen + actuele werkzaamheden)
"""
url = "http://webservices.ns.nl/ns-api-storingen?station=${Stationsnaam}&actual=${true or false}&unplanned=${true or false}"
url = "http://webservices.ns.nl/ns-api-storingen?actual=true&unplanned=true"
raw_disruptions = self._request('GET', url)
return self.parse_disruptions(raw_disruptions)
def parse_departures(self, xml):
"""
Parse the NS API xml result into Departure objects
@param xml: raw XML result from the NS API
"""
obj = xmltodict.parse(xml)
departures = []
for departure in obj['ActueleVertrekTijden']['VertrekkendeTrein']:
newdep = Departure(departure)
departures.append(newdep)
#print('-- dep --')
#print(newdep.__dict__)
#print(newdep.to_json())
print(newdep.delay)
return departures
def get_departures(self, station):
"""
Fetch the current departure times from this station
http://webservices.ns.nl/ns-api-avt?station=${Naam of afkorting Station}
@param station: station to lookup
"""
url = 'http://webservices.ns.nl/ns-api-avt?station=' + station
raw_departures = self._request('GET', url)
return self.parse_departures(raw_departures)
def parse_trips(self, xml, requested_time):
"""
Parse the NS API xml result into Trip objects
"""
obj = xmltodict.parse(xml)
trips = []
if 'error' in obj:
print('Error in trips: ' + obj['error']['message'])
return None
try:
for trip in obj['ReisMogelijkheden']['ReisMogelijkheid']:
newtrip = Trip(trip, requested_time)
trips.append(newtrip)
except TypeError:
# If no options are found, obj['ReisMogelijkheden'] is None
return None
return trips
def get_trips(self, timestamp, start, via, destination, departure=True, prev_advices=1, next_advices=1):
"""
Fetch trip possibilities for these parameters
http://webservices.ns.nl/ns-api-treinplanner?<parameters>
fromStation
toStation
dateTime: 2012-02-21T15:50
departure: true for starting at timestamp, false for arriving at timestamp
previousAdvices
nextAdvices
"""
timezonestring = '+0100'
if is_dst('Europe/Amsterdam'):
timezonestring = '+0200'
url = 'http://webservices.ns.nl/ns-api-treinplanner?'
url = url + 'fromStation=' + start
url = url + '&toStation=' + destination
if via:
url = url + '&via=' + via
if len(timestamp) == 5:
# Format of HH:MM - api needs yyyy-mm-ddThh:mm
timestamp = time.strftime("%Y-%m-%d") + 'T' + timestamp
#requested_time = datetime.strptime(timestamp, "%Y-%m-%dT%H:%M")
# TODO: DST/normal time
requested_time = load_datetime(timestamp + timezonestring, "%Y-%m-%dT%H:%M%z")
else:
#requested_time = datetime.strptime(timestamp, "%d-%m-%Y %H:%M")
requested_time = load_datetime(timestamp + timezonestring, "%d-%m-%Y %H:%M%z")
timestamp = datetime.strptime(timestamp, "%d-%m-%Y %H:%M").strftime("%Y-%m-%dT%H:%M")
url = url + '&previousAdvices=' + str(prev_advices)
url = url + '&nextAdvices=' + str(next_advices)
url = url + '&dateTime=' + timestamp
raw_trips = self._request('GET', url)
return self.parse_trips(raw_trips, requested_time)
def parse_stations(self, xml):
obj = xmltodict.parse(xml)
stations = []
for station in obj['Stations']['Station']:
newstat = Station(station)
stations.append(newstat)
print(len(stations))
return stations
|
thespacedoctor/astrocalc
|
astrocalc/__init__.py
|
luminosity_to_flux
|
python
|
def luminosity_to_flux(lumErg_S, dist_Mpc):
################ > IMPORTS ################
## STANDARD LIB ##
## THIRD PARTY ##
import numpy as np
import math
## LOCAL APPLICATION ##
################ > VARIABLE SETTINGS ######
################ >ACTION(S) ################
# Convert the distance to cm
distCm = dist_Mpc * MPC_2_CMS
fluxErg_cm2_S = lumErg_S / (4 * np.pi * distCm ** 2)
return fluxErg_cm2_S
|
*Convert luminosity to a flux*
**Key Arguments:**
- ``lumErg_S`` -- luminosity in ergs/sec
- ``dist_Mpc`` -- distance in Mpc
**Return:**
- ``fluxErg_cm2_S`` -- flux in ergs/cm2/s
|
train
|
https://github.com/thespacedoctor/astrocalc/blob/dfbebf9b86d7b2d2110c48a6a4f4194bf8885b86/astrocalc/__init__.py#L3-L28
| null |
##########################################################################
# PRIVATE (HELPER) FUNCTIONS #
##########################################################################
if __name__ == '__main__':
main()
|
thespacedoctor/astrocalc
|
astrocalc/times/now.py
|
now.get_mjd
|
python
|
def get_mjd(self):
self.log.info('starting the ``get_mjd`` method')
jd = time.time() / 86400.0 + 2440587.5
mjd = jd - 2400000.5
self.log.info('completed the ``get_mjd`` method')
return mjd
|
*Get the current time as an MJD*
**Return:**
- ``mjd`` -- the current MJD as a float
**Usage:**
.. todo::
- add clutil
- remove `getCurrentMJD` from all other code
.. code-block:: python
from astrocalc.times import now
mjd = now(
log=log
).get_mjd()
|
train
|
https://github.com/thespacedoctor/astrocalc/blob/dfbebf9b86d7b2d2110c48a6a4f4194bf8885b86/astrocalc/times/now.py#L47-L74
| null |
class now():
"""
*Report the current time into various formats*
**Key Arguments:**
- ``log`` -- logger
- ``settings`` -- the settings dictionary
"""
# Initialisation
def __init__(
self,
log,
settings=False,
):
self.log = log
log.debug("instansiating a new 'now' object")
self.settings = settings
# xt-self-arg-tmpx
# Initial Actions
return None
|
thespacedoctor/astrocalc
|
astrocalc/coords/unit_conversion.py
|
unit_conversion.get
|
python
|
def get(self):
self.log.info('starting the ``get`` method')
unit_conversion = None
self.log.info('completed the ``get`` method')
return unit_conversion
|
*get the unit_conversion object*
**Return:**
- ``unit_conversion``
.. todo::
- @review: when complete, clean get method
- @review: when complete add logging
|
train
|
https://github.com/thespacedoctor/astrocalc/blob/dfbebf9b86d7b2d2110c48a6a4f4194bf8885b86/astrocalc/coords/unit_conversion.py#L71-L88
| null |
class unit_conversion():
"""
*The worker class for the unit_conversion module*
**Key Arguments:**
- ``log`` -- logger
- ``settings`` -- the settings dictionary (prob not required)
**Usage:**
.. todo::
- add usage info
- create a sublime snippet for usage
- add ra_sexegesimal_to_decimal
.. code-block:: python
usage code
.. todo::
- @review: when complete, clean unit_conversion class
- @review: when complete add logging
- @review: when complete, decide whether to abstract class to another module
"""
# Initialisation
# 1. @flagged: what are the unique attrributes for each object? Add them
# to __init__
def __init__(
self,
log,
settings=False
):
self.log = log
log.debug("instansiating a new 'unit_conversion' object")
self.settings = settings
# xt-self-arg-tmpx
# 2. @flagged: what are the default attrributes each object could have? Add them to variable attribute set here
# Variable Data Atrributes
# 3. @flagged: what variable attrributes need overriden in any baseclass(es) used
# Override Variable Data Atrributes
# Initial Actions
return None
# 4. @flagged: what actions does each object have to be able to perform? Add them here
# Method Attributes
def dec_sexegesimal_to_decimal(
self,
dec):
"""
*Convert a declination from sexegesimal format to decimal degrees.*
Precision should be respected. If a float is passed to this method, the same float will be returned (useful if unclear which format coordinates are in).
The code will attempt to read the sexegesimal value in whatever form it is passed. Any of the following should be handled correctly:
- ``+1:58:05.45341``
- ``01:5:05``
- ``+1 58 05.45341``
- ``-23h53m05s``
**Key Arguments:**
- ``dec`` - DEC in sexegesimal format.
**Return:**
- ``decDeg`` -- declination converted to decimal degrees
**Usage:**
.. todo::
- replace dryxPython declination_sexegesimal_to_decimal with this version in all my code
- replace coords_sex_to_dec in all code
.. code-block:: python
from astrocalc.coords import unit_conversion
converter = unit_conversion(
log=log
)
dec = converter.dec_sexegesimal_to_decimal(
dec="-23:45:21.23232"
)
print dec
# OUTPUT: -23.7558978667
"""
self.log.info(
'starting the ``dec_sexegesimal_to_decimal`` method')
import re
# TEST TO SEE IF DECIMAL DEGREES PASSED
try:
dec = float(dec)
if dec > -90. and dec < 90.:
self.log.info(
'declination seems to already be in decimal degrees, returning original value' % locals())
return float(dec)
except:
pass
# REMOVE SURROUNDING WHITESPACE
dec = str(dec).strip()
# LOOK FOR A MINUS SIGN. NOTE THAT -00 IS THE SAME AS 00.
regex = re.compile(
'^([\+\-]?(\d|[0-8]\d))\D+([0-5]\d)\D+([0-6]?\d(\.\d+)?)$')
decMatch = regex.match(dec)
if decMatch:
degrees = decMatch.group(1)
minutes = decMatch.group(3)
seconds = decMatch.group(4)
if degrees[0] == '-':
sgn = -1
else:
sgn = 1
degrees = abs(float(degrees))
minutes = float(minutes)
seconds = float(seconds)
# PRECISION TEST
# 1s = .000277778 DEGREE
# THEREFORE REPORT SECONDS TO A PRECISION = INPUT PRECISION + 4
decimalLen = len(repr(seconds).split(".")[-1])
precision = decimalLen + 4
decDeg = (degrees + (minutes / 60.0)
+ (seconds / 3600.0)) * sgn
decDeg = "%0.*f" % (precision, decDeg)
else:
raise IOError(
"could not convert dec to decimal degrees, could not parse sexegesimal input. Original value was `%(dec)s`" % locals())
decDeg = float(decDeg)
self.log.debug('decDeg: %(decDeg)s' % locals())
self.log.info(
'completed the ``dec_sexegesimal_to_decimal`` method')
return float(decDeg)
def ra_sexegesimal_to_decimal(
self,
ra
):
"""
*Convert a right-ascension from sexegesimal format to decimal degrees.*
Precision should be respected. If a float is passed to this method, the same float will be returned (useful if unclear which format coordinates are in).
The code will attempt to read the sexegesimal value in whatever form it is passed. Any of the following should be handled correctly
- ``23:45:21.23232``
- ``23h45m21.23232s``
- ``23 45 21.23232``
- ``2 04 21.23232``
- ``04:45 21``
**Key Arguments:**
- ``ra`` -- ra in sexegesimal units
**Return:**
- ``decimalDegrees``
**Usage:**
.. code-block:: python
- replace dryxPython ra_sexegesimal_to_decimal with this version in all my code
from astrocalc.coords import unit_conversion
converter = unit_conversion(
log=log
)
ra = converter.ra_sexegesimal_to_decimal(
ra="04:45 21"
)
print ra
# OUTPUT: 71.3375
"""
import re
# TEST TO SEE IF DECIMAL DEGREES PASSED
try:
ra = float(ra)
if ra >= 0. and ra <= 360.:
self.log.info(
'RA seems to already be in decimal degrees, returning original value' % locals())
return float(ra)
except:
pass
# REMOVE SURROUNDING WHITESPACE
ra = str(ra).strip()
regex = re.compile(
'^(\+?(\d|[0-1]\d|2[0-3]))\D+([0-5]\d)\D+([0-6]?\d(\.\d*?)?)(s)?\s*?$')
raMatch = regex.match(ra)
if raMatch:
degrees = raMatch.group(1)
minutes = raMatch.group(3)
seconds = raMatch.group(4)
degrees = abs(float(degrees)) * 15.0
minutes = float(minutes) * 15.0
seconds = float(seconds) * 15.0
# PRECISION TEST
# 1s ARCSEC = .000018519 DEGREE
# THEREFORE REPORT SECONDS TO A PRECISION = INPUT PRECISION + 5
decimalLen = len(repr(seconds).split(".")[-1])
precision = decimalLen + 5
decimalDegrees = (degrees + (minutes / 60.0)
+ (seconds / 3600.0))
decimalDegrees = "%0.*f" % (precision, decimalDegrees)
else:
raise IOError(
"could not convert ra to decimal degrees, could not parse sexegesimal input. Original value was `%(ra)s`" % locals())
raDeg = decimalDegrees
self.log.debug('raDeg: %(decimalDegrees)s' % locals())
self.log.info(
'completed the ``ra_sexegesimal_to_decimal`` method')
return float(raDeg)
def ra_decimal_to_sexegesimal(
self,
ra,
delimiter=":"):
"""
*Convert a right-ascension between decimal degrees and sexegesimal.*
Precision should be respected.
**Key Arguments:**
- ``ra`` -- RA in decimal degrees. Will try and convert to float before performing calculation.
- ``delimiter`` -- how to delimit the RA units. Default *:*
**Return:**
- ``sexegesimal`` -- ra in sexegesimal units
**Usage:**
.. todo::
- replace ra_to_sex from dryxPython in all code
.. code-block:: python
from astrocalc.coords import unit_conversion
converter = unit_conversion(
log=log
)
ra = converter.ra_decimal_to_sexegesimal(
ra="-23.454676456",
delimiter=":"
)
print ra
# OUT: 22:26:10.87
"""
self.log.info('starting the ``ra_decimal_to_sexegesimal`` method')
# CONVERT RA TO FLOAT
try:
self.log.debug("attempting to convert RA to float")
ra = float(ra)
except Exception, e:
self.log.error(
"could not convert RA to float - failed with this error: %s " % (str(e),))
return -1
# COMPLAIN IF RA NOT BETWEEN -360 - 360
if ra > 0. and ra < 360.:
pass
elif ra < 0 and ra > -360.:
ra = 360. + ra
else:
self.log.error(
"RA must be between 0 - 360 degrees")
return -1
# PRECISION TEST
# 1s ARCSEC = .000018519 DEGREE
# THEREFORE REPORT SECONDS TO A PRECISION = INPUT PRECISION - 5
decimalLen = len(repr(ra).split(".")[-1])
precision = decimalLen - 5
# CALCULATION FROM DECIMAL DEGREES
import math
ra_hh = int(ra / 15)
ra_mm = int((ra / 15 - ra_hh) * 60)
ra_ss = int(((ra / 15 - ra_hh) * 60 - ra_mm) * 60)
ra_ff = ((ra / 15 - ra_hh) * 60 - ra_mm) * 60 - ra_ss
# SET PRECISION
ra_ff = repr(ra_ff)[2:]
ra_ff = ra_ff[:precision]
if len(ra_ff):
ra_ff = "." + ra_ff
if precision < 0:
ra_ff = ""
sexegesimal = '%02d' % ra_hh + delimiter + '%02d' % ra_mm + \
delimiter + '%02d' % ra_ss + ra_ff
self.log.info('completed the ``ra_decimal_to_sexegesimal`` method')
return sexegesimal
def dec_decimal_to_sexegesimal(
self,
dec,
delimiter=":"):
"""
*Convert a declination between decimal degrees and sexegesimal.*
Precision should be respected.
**Key Arguments:**
- ``dec`` -- DEC in decimal degrees. Will try and convert to float before performing calculation.
- ``delimiter`` -- how to delimit the RA units. Default *:*
**Return:**
- ``sexegesimal`` -- ra in sexegesimal units
**Usage:**
.. todo::
- replace dec_to_sex in dryxPython in all code
.. code-block:: python
from astrocalc.coords import unit_conversion
converter = unit_conversion(
log=log
)
dec = converter.dec_decimal_to_sexegesimal(
dec="-3.454676456",
delimiter=":"
)
print dec
# OUT: -03:27:16.8
"""
self.log.info('starting the ``dec_decimal_to_sexegesimal`` method')
import math
# CONVERT DEC TO FLOAT
try:
self.log.debug("attempting to convert RA to float")
dec = float(dec)
except Exception, e:
self.log.error(
"could not convert RA to float - failed with this error: %s " % (str(e),))
return -1
# COMPLAIN IF DEC NOT BETWEEN -90 - 90
if dec > -90. and dec < 90.:
pass
else:
self.log.error(
"DEC must be between -90 - 90 degrees")
return -1
if (dec >= 0):
hemisphere = '+'
else:
hemisphere = '-'
dec *= -1
# PRECISION TEST
# 1s = .000277778 DEGREE
# THEREFORE REPORT SECONDS TO A PRECISION = INPUT PRECISION - 4
decimalLen = len(repr(dec).split(".")[-1])
precision = decimalLen - 4
dec_deg = int(dec)
dec_mm = int((dec - dec_deg) * 60)
dec_ss = int(((dec - dec_deg) * 60 - dec_mm) * 60)
dec_f = (((dec - dec_deg) * 60 - dec_mm) * 60) - dec_ss
# SET PRECISION
dec_f = repr(dec_f)[2:]
dec_f = dec_f[:precision]
if len(dec_f):
dec_f = "." + dec_f
if precision < 0:
dec_f = ""
sexegesimal = hemisphere + '%02d' % dec_deg + delimiter + \
'%02d' % dec_mm + delimiter + '%02d' % dec_ss + dec_f
self.log.info('completed the ``dec_decimal_to_sexegesimal`` method')
return sexegesimal
# use the tab-trigger below for new method
def ra_dec_to_cartesian(
self,
ra,
dec):
"""*Convert an RA, DEC coordinate set to x, y, z cartesian coordinates*
**Key Arguments:**
- ``ra`` -- right ascension in sexegesimal or decimal degress.
- ``dec`` -- declination in sexegesimal or decimal degress.
**Return:**
- ``cartesians`` -- tuple of (x, y, z) coordinates
.. todo::
- replace calculate_cartesians in all code
**Usage:**
.. code-block:: python
from astrocalc.coords import unit_conversion
converter = unit_conversion(
log=log
)
x, y, z = converter.ra_dec_to_cartesian(
ra="23 45 21.23232",
dec="+01:58:5.45341"
)
print x, y, z
# OUTPUT: 0.9973699780687104, -0.06382462462791459, 0.034344492110465606
"""
self.log.info('starting the ``ra_dec_to_cartesian`` method')
ra = self.ra_sexegesimal_to_decimal(
ra=ra
)
dec = self.dec_sexegesimal_to_decimal(
dec=dec
)
ra = math.radians(ra)
dec = math.radians(dec)
cos_dec = math.cos(dec)
cx = math.cos(ra) * cos_dec
cy = math.sin(ra) * cos_dec
cz = math.sin(dec)
cartesians = (cx, cy, cz)
self.log.info('completed the ``ra_dec_to_cartesian`` method')
return cartesians
|
thespacedoctor/astrocalc
|
astrocalc/coords/unit_conversion.py
|
unit_conversion.dec_sexegesimal_to_decimal
|
python
|
def dec_sexegesimal_to_decimal(
self,
dec):
self.log.info(
'starting the ``dec_sexegesimal_to_decimal`` method')
import re
# TEST TO SEE IF DECIMAL DEGREES PASSED
try:
dec = float(dec)
if dec > -90. and dec < 90.:
self.log.info(
'declination seems to already be in decimal degrees, returning original value' % locals())
return float(dec)
except:
pass
# REMOVE SURROUNDING WHITESPACE
dec = str(dec).strip()
# LOOK FOR A MINUS SIGN. NOTE THAT -00 IS THE SAME AS 00.
regex = re.compile(
'^([\+\-]?(\d|[0-8]\d))\D+([0-5]\d)\D+([0-6]?\d(\.\d+)?)$')
decMatch = regex.match(dec)
if decMatch:
degrees = decMatch.group(1)
minutes = decMatch.group(3)
seconds = decMatch.group(4)
if degrees[0] == '-':
sgn = -1
else:
sgn = 1
degrees = abs(float(degrees))
minutes = float(minutes)
seconds = float(seconds)
# PRECISION TEST
# 1s = .000277778 DEGREE
# THEREFORE REPORT SECONDS TO A PRECISION = INPUT PRECISION + 4
decimalLen = len(repr(seconds).split(".")[-1])
precision = decimalLen + 4
decDeg = (degrees + (minutes / 60.0)
+ (seconds / 3600.0)) * sgn
decDeg = "%0.*f" % (precision, decDeg)
else:
raise IOError(
"could not convert dec to decimal degrees, could not parse sexegesimal input. Original value was `%(dec)s`" % locals())
decDeg = float(decDeg)
self.log.debug('decDeg: %(decDeg)s' % locals())
self.log.info(
'completed the ``dec_sexegesimal_to_decimal`` method')
return float(decDeg)
|
*Convert a declination from sexegesimal format to decimal degrees.*
Precision should be respected. If a float is passed to this method, the same float will be returned (useful if unclear which format coordinates are in).
The code will attempt to read the sexegesimal value in whatever form it is passed. Any of the following should be handled correctly:
- ``+1:58:05.45341``
- ``01:5:05``
- ``+1 58 05.45341``
- ``-23h53m05s``
**Key Arguments:**
- ``dec`` - DEC in sexegesimal format.
**Return:**
- ``decDeg`` -- declination converted to decimal degrees
**Usage:**
.. todo::
- replace dryxPython declination_sexegesimal_to_decimal with this version in all my code
- replace coords_sex_to_dec in all code
.. code-block:: python
from astrocalc.coords import unit_conversion
converter = unit_conversion(
log=log
)
dec = converter.dec_sexegesimal_to_decimal(
dec="-23:45:21.23232"
)
print dec
# OUTPUT: -23.7558978667
|
train
|
https://github.com/thespacedoctor/astrocalc/blob/dfbebf9b86d7b2d2110c48a6a4f4194bf8885b86/astrocalc/coords/unit_conversion.py#L90-L188
| null |
class unit_conversion():
"""
*The worker class for the unit_conversion module*
**Key Arguments:**
- ``log`` -- logger
- ``settings`` -- the settings dictionary (prob not required)
**Usage:**
.. todo::
- add usage info
- create a sublime snippet for usage
- add ra_sexegesimal_to_decimal
.. code-block:: python
usage code
.. todo::
- @review: when complete, clean unit_conversion class
- @review: when complete add logging
- @review: when complete, decide whether to abstract class to another module
"""
# Initialisation
# 1. @flagged: what are the unique attrributes for each object? Add them
# to __init__
def __init__(
self,
log,
settings=False
):
self.log = log
log.debug("instansiating a new 'unit_conversion' object")
self.settings = settings
# xt-self-arg-tmpx
# 2. @flagged: what are the default attrributes each object could have? Add them to variable attribute set here
# Variable Data Atrributes
# 3. @flagged: what variable attrributes need overriden in any baseclass(es) used
# Override Variable Data Atrributes
# Initial Actions
return None
# 4. @flagged: what actions does each object have to be able to perform? Add them here
# Method Attributes
def get(self):
"""
*get the unit_conversion object*
**Return:**
- ``unit_conversion``
.. todo::
- @review: when complete, clean get method
- @review: when complete add logging
"""
self.log.info('starting the ``get`` method')
unit_conversion = None
self.log.info('completed the ``get`` method')
return unit_conversion
def ra_sexegesimal_to_decimal(
self,
ra
):
"""
*Convert a right-ascension from sexegesimal format to decimal degrees.*
Precision should be respected. If a float is passed to this method, the same float will be returned (useful if unclear which format coordinates are in).
The code will attempt to read the sexegesimal value in whatever form it is passed. Any of the following should be handled correctly
- ``23:45:21.23232``
- ``23h45m21.23232s``
- ``23 45 21.23232``
- ``2 04 21.23232``
- ``04:45 21``
**Key Arguments:**
- ``ra`` -- ra in sexegesimal units
**Return:**
- ``decimalDegrees``
**Usage:**
.. code-block:: python
- replace dryxPython ra_sexegesimal_to_decimal with this version in all my code
from astrocalc.coords import unit_conversion
converter = unit_conversion(
log=log
)
ra = converter.ra_sexegesimal_to_decimal(
ra="04:45 21"
)
print ra
# OUTPUT: 71.3375
"""
import re
# TEST TO SEE IF DECIMAL DEGREES PASSED
try:
ra = float(ra)
if ra >= 0. and ra <= 360.:
self.log.info(
'RA seems to already be in decimal degrees, returning original value' % locals())
return float(ra)
except:
pass
# REMOVE SURROUNDING WHITESPACE
ra = str(ra).strip()
regex = re.compile(
'^(\+?(\d|[0-1]\d|2[0-3]))\D+([0-5]\d)\D+([0-6]?\d(\.\d*?)?)(s)?\s*?$')
raMatch = regex.match(ra)
if raMatch:
degrees = raMatch.group(1)
minutes = raMatch.group(3)
seconds = raMatch.group(4)
degrees = abs(float(degrees)) * 15.0
minutes = float(minutes) * 15.0
seconds = float(seconds) * 15.0
# PRECISION TEST
# 1s ARCSEC = .000018519 DEGREE
# THEREFORE REPORT SECONDS TO A PRECISION = INPUT PRECISION + 5
decimalLen = len(repr(seconds).split(".")[-1])
precision = decimalLen + 5
decimalDegrees = (degrees + (minutes / 60.0)
+ (seconds / 3600.0))
decimalDegrees = "%0.*f" % (precision, decimalDegrees)
else:
raise IOError(
"could not convert ra to decimal degrees, could not parse sexegesimal input. Original value was `%(ra)s`" % locals())
raDeg = decimalDegrees
self.log.debug('raDeg: %(decimalDegrees)s' % locals())
self.log.info(
'completed the ``ra_sexegesimal_to_decimal`` method')
return float(raDeg)
def ra_decimal_to_sexegesimal(
self,
ra,
delimiter=":"):
"""
*Convert a right-ascension between decimal degrees and sexegesimal.*
Precision should be respected.
**Key Arguments:**
- ``ra`` -- RA in decimal degrees. Will try and convert to float before performing calculation.
- ``delimiter`` -- how to delimit the RA units. Default *:*
**Return:**
- ``sexegesimal`` -- ra in sexegesimal units
**Usage:**
.. todo::
- replace ra_to_sex from dryxPython in all code
.. code-block:: python
from astrocalc.coords import unit_conversion
converter = unit_conversion(
log=log
)
ra = converter.ra_decimal_to_sexegesimal(
ra="-23.454676456",
delimiter=":"
)
print ra
# OUT: 22:26:10.87
"""
self.log.info('starting the ``ra_decimal_to_sexegesimal`` method')
# CONVERT RA TO FLOAT
try:
self.log.debug("attempting to convert RA to float")
ra = float(ra)
except Exception, e:
self.log.error(
"could not convert RA to float - failed with this error: %s " % (str(e),))
return -1
# COMPLAIN IF RA NOT BETWEEN -360 - 360
if ra > 0. and ra < 360.:
pass
elif ra < 0 and ra > -360.:
ra = 360. + ra
else:
self.log.error(
"RA must be between 0 - 360 degrees")
return -1
# PRECISION TEST
# 1s ARCSEC = .000018519 DEGREE
# THEREFORE REPORT SECONDS TO A PRECISION = INPUT PRECISION - 5
decimalLen = len(repr(ra).split(".")[-1])
precision = decimalLen - 5
# CALCULATION FROM DECIMAL DEGREES
import math
ra_hh = int(ra / 15)
ra_mm = int((ra / 15 - ra_hh) * 60)
ra_ss = int(((ra / 15 - ra_hh) * 60 - ra_mm) * 60)
ra_ff = ((ra / 15 - ra_hh) * 60 - ra_mm) * 60 - ra_ss
# SET PRECISION
ra_ff = repr(ra_ff)[2:]
ra_ff = ra_ff[:precision]
if len(ra_ff):
ra_ff = "." + ra_ff
if precision < 0:
ra_ff = ""
sexegesimal = '%02d' % ra_hh + delimiter + '%02d' % ra_mm + \
delimiter + '%02d' % ra_ss + ra_ff
self.log.info('completed the ``ra_decimal_to_sexegesimal`` method')
return sexegesimal
def dec_decimal_to_sexegesimal(
self,
dec,
delimiter=":"):
"""
*Convert a declination between decimal degrees and sexegesimal.*
Precision should be respected.
**Key Arguments:**
- ``dec`` -- DEC in decimal degrees. Will try and convert to float before performing calculation.
- ``delimiter`` -- how to delimit the RA units. Default *:*
**Return:**
- ``sexegesimal`` -- ra in sexegesimal units
**Usage:**
.. todo::
- replace dec_to_sex in dryxPython in all code
.. code-block:: python
from astrocalc.coords import unit_conversion
converter = unit_conversion(
log=log
)
dec = converter.dec_decimal_to_sexegesimal(
dec="-3.454676456",
delimiter=":"
)
print dec
# OUT: -03:27:16.8
"""
self.log.info('starting the ``dec_decimal_to_sexegesimal`` method')
import math
# CONVERT DEC TO FLOAT
try:
self.log.debug("attempting to convert RA to float")
dec = float(dec)
except Exception, e:
self.log.error(
"could not convert RA to float - failed with this error: %s " % (str(e),))
return -1
# COMPLAIN IF DEC NOT BETWEEN -90 - 90
if dec > -90. and dec < 90.:
pass
else:
self.log.error(
"DEC must be between -90 - 90 degrees")
return -1
if (dec >= 0):
hemisphere = '+'
else:
hemisphere = '-'
dec *= -1
# PRECISION TEST
# 1s = .000277778 DEGREE
# THEREFORE REPORT SECONDS TO A PRECISION = INPUT PRECISION - 4
decimalLen = len(repr(dec).split(".")[-1])
precision = decimalLen - 4
dec_deg = int(dec)
dec_mm = int((dec - dec_deg) * 60)
dec_ss = int(((dec - dec_deg) * 60 - dec_mm) * 60)
dec_f = (((dec - dec_deg) * 60 - dec_mm) * 60) - dec_ss
# SET PRECISION
dec_f = repr(dec_f)[2:]
dec_f = dec_f[:precision]
if len(dec_f):
dec_f = "." + dec_f
if precision < 0:
dec_f = ""
sexegesimal = hemisphere + '%02d' % dec_deg + delimiter + \
'%02d' % dec_mm + delimiter + '%02d' % dec_ss + dec_f
self.log.info('completed the ``dec_decimal_to_sexegesimal`` method')
return sexegesimal
# use the tab-trigger below for new method
def ra_dec_to_cartesian(
self,
ra,
dec):
"""*Convert an RA, DEC coordinate set to x, y, z cartesian coordinates*
**Key Arguments:**
- ``ra`` -- right ascension in sexegesimal or decimal degress.
- ``dec`` -- declination in sexegesimal or decimal degress.
**Return:**
- ``cartesians`` -- tuple of (x, y, z) coordinates
.. todo::
- replace calculate_cartesians in all code
**Usage:**
.. code-block:: python
from astrocalc.coords import unit_conversion
converter = unit_conversion(
log=log
)
x, y, z = converter.ra_dec_to_cartesian(
ra="23 45 21.23232",
dec="+01:58:5.45341"
)
print x, y, z
# OUTPUT: 0.9973699780687104, -0.06382462462791459, 0.034344492110465606
"""
self.log.info('starting the ``ra_dec_to_cartesian`` method')
ra = self.ra_sexegesimal_to_decimal(
ra=ra
)
dec = self.dec_sexegesimal_to_decimal(
dec=dec
)
ra = math.radians(ra)
dec = math.radians(dec)
cos_dec = math.cos(dec)
cx = math.cos(ra) * cos_dec
cy = math.sin(ra) * cos_dec
cz = math.sin(dec)
cartesians = (cx, cy, cz)
self.log.info('completed the ``ra_dec_to_cartesian`` method')
return cartesians
|
thespacedoctor/astrocalc
|
astrocalc/coords/unit_conversion.py
|
unit_conversion.ra_sexegesimal_to_decimal
|
python
|
def ra_sexegesimal_to_decimal(
self,
ra
):
import re
# TEST TO SEE IF DECIMAL DEGREES PASSED
try:
ra = float(ra)
if ra >= 0. and ra <= 360.:
self.log.info(
'RA seems to already be in decimal degrees, returning original value' % locals())
return float(ra)
except:
pass
# REMOVE SURROUNDING WHITESPACE
ra = str(ra).strip()
regex = re.compile(
'^(\+?(\d|[0-1]\d|2[0-3]))\D+([0-5]\d)\D+([0-6]?\d(\.\d*?)?)(s)?\s*?$')
raMatch = regex.match(ra)
if raMatch:
degrees = raMatch.group(1)
minutes = raMatch.group(3)
seconds = raMatch.group(4)
degrees = abs(float(degrees)) * 15.0
minutes = float(minutes) * 15.0
seconds = float(seconds) * 15.0
# PRECISION TEST
# 1s ARCSEC = .000018519 DEGREE
# THEREFORE REPORT SECONDS TO A PRECISION = INPUT PRECISION + 5
decimalLen = len(repr(seconds).split(".")[-1])
precision = decimalLen + 5
decimalDegrees = (degrees + (minutes / 60.0)
+ (seconds / 3600.0))
decimalDegrees = "%0.*f" % (precision, decimalDegrees)
else:
raise IOError(
"could not convert ra to decimal degrees, could not parse sexegesimal input. Original value was `%(ra)s`" % locals())
raDeg = decimalDegrees
self.log.debug('raDeg: %(decimalDegrees)s' % locals())
self.log.info(
'completed the ``ra_sexegesimal_to_decimal`` method')
return float(raDeg)
|
*Convert a right-ascension from sexegesimal format to decimal degrees.*
Precision should be respected. If a float is passed to this method, the same float will be returned (useful if unclear which format coordinates are in).
The code will attempt to read the sexegesimal value in whatever form it is passed. Any of the following should be handled correctly
- ``23:45:21.23232``
- ``23h45m21.23232s``
- ``23 45 21.23232``
- ``2 04 21.23232``
- ``04:45 21``
**Key Arguments:**
- ``ra`` -- ra in sexegesimal units
**Return:**
- ``decimalDegrees``
**Usage:**
.. code-block:: python
- replace dryxPython ra_sexegesimal_to_decimal with this version in all my code
from astrocalc.coords import unit_conversion
converter = unit_conversion(
log=log
)
ra = converter.ra_sexegesimal_to_decimal(
ra="04:45 21"
)
print ra
# OUTPUT: 71.3375
|
train
|
https://github.com/thespacedoctor/astrocalc/blob/dfbebf9b86d7b2d2110c48a6a4f4194bf8885b86/astrocalc/coords/unit_conversion.py#L190-L278
| null |
class unit_conversion():
"""
*The worker class for the unit_conversion module*
**Key Arguments:**
- ``log`` -- logger
- ``settings`` -- the settings dictionary (prob not required)
**Usage:**
.. todo::
- add usage info
- create a sublime snippet for usage
- add ra_sexegesimal_to_decimal
.. code-block:: python
usage code
.. todo::
- @review: when complete, clean unit_conversion class
- @review: when complete add logging
- @review: when complete, decide whether to abstract class to another module
"""
# Initialisation
# 1. @flagged: what are the unique attrributes for each object? Add them
# to __init__
def __init__(
self,
log,
settings=False
):
self.log = log
log.debug("instansiating a new 'unit_conversion' object")
self.settings = settings
# xt-self-arg-tmpx
# 2. @flagged: what are the default attrributes each object could have? Add them to variable attribute set here
# Variable Data Atrributes
# 3. @flagged: what variable attrributes need overriden in any baseclass(es) used
# Override Variable Data Atrributes
# Initial Actions
return None
# 4. @flagged: what actions does each object have to be able to perform? Add them here
# Method Attributes
def get(self):
"""
*get the unit_conversion object*
**Return:**
- ``unit_conversion``
.. todo::
- @review: when complete, clean get method
- @review: when complete add logging
"""
self.log.info('starting the ``get`` method')
unit_conversion = None
self.log.info('completed the ``get`` method')
return unit_conversion
def dec_sexegesimal_to_decimal(
self,
dec):
"""
*Convert a declination from sexegesimal format to decimal degrees.*
Precision should be respected. If a float is passed to this method, the same float will be returned (useful if unclear which format coordinates are in).
The code will attempt to read the sexegesimal value in whatever form it is passed. Any of the following should be handled correctly:
- ``+1:58:05.45341``
- ``01:5:05``
- ``+1 58 05.45341``
- ``-23h53m05s``
**Key Arguments:**
- ``dec`` - DEC in sexegesimal format.
**Return:**
- ``decDeg`` -- declination converted to decimal degrees
**Usage:**
.. todo::
- replace dryxPython declination_sexegesimal_to_decimal with this version in all my code
- replace coords_sex_to_dec in all code
.. code-block:: python
from astrocalc.coords import unit_conversion
converter = unit_conversion(
log=log
)
dec = converter.dec_sexegesimal_to_decimal(
dec="-23:45:21.23232"
)
print dec
# OUTPUT: -23.7558978667
"""
self.log.info(
'starting the ``dec_sexegesimal_to_decimal`` method')
import re
# TEST TO SEE IF DECIMAL DEGREES PASSED
try:
dec = float(dec)
if dec > -90. and dec < 90.:
self.log.info(
'declination seems to already be in decimal degrees, returning original value' % locals())
return float(dec)
except:
pass
# REMOVE SURROUNDING WHITESPACE
dec = str(dec).strip()
# LOOK FOR A MINUS SIGN. NOTE THAT -00 IS THE SAME AS 00.
regex = re.compile(
'^([\+\-]?(\d|[0-8]\d))\D+([0-5]\d)\D+([0-6]?\d(\.\d+)?)$')
decMatch = regex.match(dec)
if decMatch:
degrees = decMatch.group(1)
minutes = decMatch.group(3)
seconds = decMatch.group(4)
if degrees[0] == '-':
sgn = -1
else:
sgn = 1
degrees = abs(float(degrees))
minutes = float(minutes)
seconds = float(seconds)
# PRECISION TEST
# 1s = .000277778 DEGREE
# THEREFORE REPORT SECONDS TO A PRECISION = INPUT PRECISION + 4
decimalLen = len(repr(seconds).split(".")[-1])
precision = decimalLen + 4
decDeg = (degrees + (minutes / 60.0)
+ (seconds / 3600.0)) * sgn
decDeg = "%0.*f" % (precision, decDeg)
else:
raise IOError(
"could not convert dec to decimal degrees, could not parse sexegesimal input. Original value was `%(dec)s`" % locals())
decDeg = float(decDeg)
self.log.debug('decDeg: %(decDeg)s' % locals())
self.log.info(
'completed the ``dec_sexegesimal_to_decimal`` method')
return float(decDeg)
def ra_decimal_to_sexegesimal(
self,
ra,
delimiter=":"):
"""
*Convert a right-ascension between decimal degrees and sexegesimal.*
Precision should be respected.
**Key Arguments:**
- ``ra`` -- RA in decimal degrees. Will try and convert to float before performing calculation.
- ``delimiter`` -- how to delimit the RA units. Default *:*
**Return:**
- ``sexegesimal`` -- ra in sexegesimal units
**Usage:**
.. todo::
- replace ra_to_sex from dryxPython in all code
.. code-block:: python
from astrocalc.coords import unit_conversion
converter = unit_conversion(
log=log
)
ra = converter.ra_decimal_to_sexegesimal(
ra="-23.454676456",
delimiter=":"
)
print ra
# OUT: 22:26:10.87
"""
self.log.info('starting the ``ra_decimal_to_sexegesimal`` method')
# CONVERT RA TO FLOAT
try:
self.log.debug("attempting to convert RA to float")
ra = float(ra)
except Exception, e:
self.log.error(
"could not convert RA to float - failed with this error: %s " % (str(e),))
return -1
# COMPLAIN IF RA NOT BETWEEN -360 - 360
if ra > 0. and ra < 360.:
pass
elif ra < 0 and ra > -360.:
ra = 360. + ra
else:
self.log.error(
"RA must be between 0 - 360 degrees")
return -1
# PRECISION TEST
# 1s ARCSEC = .000018519 DEGREE
# THEREFORE REPORT SECONDS TO A PRECISION = INPUT PRECISION - 5
decimalLen = len(repr(ra).split(".")[-1])
precision = decimalLen - 5
# CALCULATION FROM DECIMAL DEGREES
import math
ra_hh = int(ra / 15)
ra_mm = int((ra / 15 - ra_hh) * 60)
ra_ss = int(((ra / 15 - ra_hh) * 60 - ra_mm) * 60)
ra_ff = ((ra / 15 - ra_hh) * 60 - ra_mm) * 60 - ra_ss
# SET PRECISION
ra_ff = repr(ra_ff)[2:]
ra_ff = ra_ff[:precision]
if len(ra_ff):
ra_ff = "." + ra_ff
if precision < 0:
ra_ff = ""
sexegesimal = '%02d' % ra_hh + delimiter + '%02d' % ra_mm + \
delimiter + '%02d' % ra_ss + ra_ff
self.log.info('completed the ``ra_decimal_to_sexegesimal`` method')
return sexegesimal
def dec_decimal_to_sexegesimal(
self,
dec,
delimiter=":"):
"""
*Convert a declination between decimal degrees and sexegesimal.*
Precision should be respected.
**Key Arguments:**
- ``dec`` -- DEC in decimal degrees. Will try and convert to float before performing calculation.
- ``delimiter`` -- how to delimit the RA units. Default *:*
**Return:**
- ``sexegesimal`` -- ra in sexegesimal units
**Usage:**
.. todo::
- replace dec_to_sex in dryxPython in all code
.. code-block:: python
from astrocalc.coords import unit_conversion
converter = unit_conversion(
log=log
)
dec = converter.dec_decimal_to_sexegesimal(
dec="-3.454676456",
delimiter=":"
)
print dec
# OUT: -03:27:16.8
"""
self.log.info('starting the ``dec_decimal_to_sexegesimal`` method')
import math
# CONVERT DEC TO FLOAT
try:
self.log.debug("attempting to convert RA to float")
dec = float(dec)
except Exception, e:
self.log.error(
"could not convert RA to float - failed with this error: %s " % (str(e),))
return -1
# COMPLAIN IF DEC NOT BETWEEN -90 - 90
if dec > -90. and dec < 90.:
pass
else:
self.log.error(
"DEC must be between -90 - 90 degrees")
return -1
if (dec >= 0):
hemisphere = '+'
else:
hemisphere = '-'
dec *= -1
# PRECISION TEST
# 1s = .000277778 DEGREE
# THEREFORE REPORT SECONDS TO A PRECISION = INPUT PRECISION - 4
decimalLen = len(repr(dec).split(".")[-1])
precision = decimalLen - 4
dec_deg = int(dec)
dec_mm = int((dec - dec_deg) * 60)
dec_ss = int(((dec - dec_deg) * 60 - dec_mm) * 60)
dec_f = (((dec - dec_deg) * 60 - dec_mm) * 60) - dec_ss
# SET PRECISION
dec_f = repr(dec_f)[2:]
dec_f = dec_f[:precision]
if len(dec_f):
dec_f = "." + dec_f
if precision < 0:
dec_f = ""
sexegesimal = hemisphere + '%02d' % dec_deg + delimiter + \
'%02d' % dec_mm + delimiter + '%02d' % dec_ss + dec_f
self.log.info('completed the ``dec_decimal_to_sexegesimal`` method')
return sexegesimal
# use the tab-trigger below for new method
def ra_dec_to_cartesian(
self,
ra,
dec):
"""*Convert an RA, DEC coordinate set to x, y, z cartesian coordinates*
**Key Arguments:**
- ``ra`` -- right ascension in sexegesimal or decimal degress.
- ``dec`` -- declination in sexegesimal or decimal degress.
**Return:**
- ``cartesians`` -- tuple of (x, y, z) coordinates
.. todo::
- replace calculate_cartesians in all code
**Usage:**
.. code-block:: python
from astrocalc.coords import unit_conversion
converter = unit_conversion(
log=log
)
x, y, z = converter.ra_dec_to_cartesian(
ra="23 45 21.23232",
dec="+01:58:5.45341"
)
print x, y, z
# OUTPUT: 0.9973699780687104, -0.06382462462791459, 0.034344492110465606
"""
self.log.info('starting the ``ra_dec_to_cartesian`` method')
ra = self.ra_sexegesimal_to_decimal(
ra=ra
)
dec = self.dec_sexegesimal_to_decimal(
dec=dec
)
ra = math.radians(ra)
dec = math.radians(dec)
cos_dec = math.cos(dec)
cx = math.cos(ra) * cos_dec
cy = math.sin(ra) * cos_dec
cz = math.sin(dec)
cartesians = (cx, cy, cz)
self.log.info('completed the ``ra_dec_to_cartesian`` method')
return cartesians
|
thespacedoctor/astrocalc
|
astrocalc/coords/unit_conversion.py
|
unit_conversion.ra_decimal_to_sexegesimal
|
python
|
def ra_decimal_to_sexegesimal(
self,
ra,
delimiter=":"):
self.log.info('starting the ``ra_decimal_to_sexegesimal`` method')
# CONVERT RA TO FLOAT
try:
self.log.debug("attempting to convert RA to float")
ra = float(ra)
except Exception, e:
self.log.error(
"could not convert RA to float - failed with this error: %s " % (str(e),))
return -1
# COMPLAIN IF RA NOT BETWEEN -360 - 360
if ra > 0. and ra < 360.:
pass
elif ra < 0 and ra > -360.:
ra = 360. + ra
else:
self.log.error(
"RA must be between 0 - 360 degrees")
return -1
# PRECISION TEST
# 1s ARCSEC = .000018519 DEGREE
# THEREFORE REPORT SECONDS TO A PRECISION = INPUT PRECISION - 5
decimalLen = len(repr(ra).split(".")[-1])
precision = decimalLen - 5
# CALCULATION FROM DECIMAL DEGREES
import math
ra_hh = int(ra / 15)
ra_mm = int((ra / 15 - ra_hh) * 60)
ra_ss = int(((ra / 15 - ra_hh) * 60 - ra_mm) * 60)
ra_ff = ((ra / 15 - ra_hh) * 60 - ra_mm) * 60 - ra_ss
# SET PRECISION
ra_ff = repr(ra_ff)[2:]
ra_ff = ra_ff[:precision]
if len(ra_ff):
ra_ff = "." + ra_ff
if precision < 0:
ra_ff = ""
sexegesimal = '%02d' % ra_hh + delimiter + '%02d' % ra_mm + \
delimiter + '%02d' % ra_ss + ra_ff
self.log.info('completed the ``ra_decimal_to_sexegesimal`` method')
return sexegesimal
|
*Convert a right-ascension between decimal degrees and sexegesimal.*
Precision should be respected.
**Key Arguments:**
- ``ra`` -- RA in decimal degrees. Will try and convert to float before performing calculation.
- ``delimiter`` -- how to delimit the RA units. Default *:*
**Return:**
- ``sexegesimal`` -- ra in sexegesimal units
**Usage:**
.. todo::
- replace ra_to_sex from dryxPython in all code
.. code-block:: python
from astrocalc.coords import unit_conversion
converter = unit_conversion(
log=log
)
ra = converter.ra_decimal_to_sexegesimal(
ra="-23.454676456",
delimiter=":"
)
print ra
# OUT: 22:26:10.87
|
train
|
https://github.com/thespacedoctor/astrocalc/blob/dfbebf9b86d7b2d2110c48a6a4f4194bf8885b86/astrocalc/coords/unit_conversion.py#L280-L361
| null |
class unit_conversion():
"""
*The worker class for the unit_conversion module*
**Key Arguments:**
- ``log`` -- logger
- ``settings`` -- the settings dictionary (prob not required)
**Usage:**
.. todo::
- add usage info
- create a sublime snippet for usage
- add ra_sexegesimal_to_decimal
.. code-block:: python
usage code
.. todo::
- @review: when complete, clean unit_conversion class
- @review: when complete add logging
- @review: when complete, decide whether to abstract class to another module
"""
# Initialisation
# 1. @flagged: what are the unique attrributes for each object? Add them
# to __init__
def __init__(
self,
log,
settings=False
):
self.log = log
log.debug("instansiating a new 'unit_conversion' object")
self.settings = settings
# xt-self-arg-tmpx
# 2. @flagged: what are the default attrributes each object could have? Add them to variable attribute set here
# Variable Data Atrributes
# 3. @flagged: what variable attrributes need overriden in any baseclass(es) used
# Override Variable Data Atrributes
# Initial Actions
return None
# 4. @flagged: what actions does each object have to be able to perform? Add them here
# Method Attributes
def get(self):
"""
*get the unit_conversion object*
**Return:**
- ``unit_conversion``
.. todo::
- @review: when complete, clean get method
- @review: when complete add logging
"""
self.log.info('starting the ``get`` method')
unit_conversion = None
self.log.info('completed the ``get`` method')
return unit_conversion
def dec_sexegesimal_to_decimal(
self,
dec):
"""
*Convert a declination from sexegesimal format to decimal degrees.*
Precision should be respected. If a float is passed to this method, the same float will be returned (useful if unclear which format coordinates are in).
The code will attempt to read the sexegesimal value in whatever form it is passed. Any of the following should be handled correctly:
- ``+1:58:05.45341``
- ``01:5:05``
- ``+1 58 05.45341``
- ``-23h53m05s``
**Key Arguments:**
- ``dec`` - DEC in sexegesimal format.
**Return:**
- ``decDeg`` -- declination converted to decimal degrees
**Usage:**
.. todo::
- replace dryxPython declination_sexegesimal_to_decimal with this version in all my code
- replace coords_sex_to_dec in all code
.. code-block:: python
from astrocalc.coords import unit_conversion
converter = unit_conversion(
log=log
)
dec = converter.dec_sexegesimal_to_decimal(
dec="-23:45:21.23232"
)
print dec
# OUTPUT: -23.7558978667
"""
self.log.info(
'starting the ``dec_sexegesimal_to_decimal`` method')
import re
# TEST TO SEE IF DECIMAL DEGREES PASSED
try:
dec = float(dec)
if dec > -90. and dec < 90.:
self.log.info(
'declination seems to already be in decimal degrees, returning original value' % locals())
return float(dec)
except:
pass
# REMOVE SURROUNDING WHITESPACE
dec = str(dec).strip()
# LOOK FOR A MINUS SIGN. NOTE THAT -00 IS THE SAME AS 00.
regex = re.compile(
'^([\+\-]?(\d|[0-8]\d))\D+([0-5]\d)\D+([0-6]?\d(\.\d+)?)$')
decMatch = regex.match(dec)
if decMatch:
degrees = decMatch.group(1)
minutes = decMatch.group(3)
seconds = decMatch.group(4)
if degrees[0] == '-':
sgn = -1
else:
sgn = 1
degrees = abs(float(degrees))
minutes = float(minutes)
seconds = float(seconds)
# PRECISION TEST
# 1s = .000277778 DEGREE
# THEREFORE REPORT SECONDS TO A PRECISION = INPUT PRECISION + 4
decimalLen = len(repr(seconds).split(".")[-1])
precision = decimalLen + 4
decDeg = (degrees + (minutes / 60.0)
+ (seconds / 3600.0)) * sgn
decDeg = "%0.*f" % (precision, decDeg)
else:
raise IOError(
"could not convert dec to decimal degrees, could not parse sexegesimal input. Original value was `%(dec)s`" % locals())
decDeg = float(decDeg)
self.log.debug('decDeg: %(decDeg)s' % locals())
self.log.info(
'completed the ``dec_sexegesimal_to_decimal`` method')
return float(decDeg)
def ra_sexegesimal_to_decimal(
self,
ra
):
"""
*Convert a right-ascension from sexegesimal format to decimal degrees.*
Precision should be respected. If a float is passed to this method, the same float will be returned (useful if unclear which format coordinates are in).
The code will attempt to read the sexegesimal value in whatever form it is passed. Any of the following should be handled correctly
- ``23:45:21.23232``
- ``23h45m21.23232s``
- ``23 45 21.23232``
- ``2 04 21.23232``
- ``04:45 21``
**Key Arguments:**
- ``ra`` -- ra in sexegesimal units
**Return:**
- ``decimalDegrees``
**Usage:**
.. code-block:: python
- replace dryxPython ra_sexegesimal_to_decimal with this version in all my code
from astrocalc.coords import unit_conversion
converter = unit_conversion(
log=log
)
ra = converter.ra_sexegesimal_to_decimal(
ra="04:45 21"
)
print ra
# OUTPUT: 71.3375
"""
import re
# TEST TO SEE IF DECIMAL DEGREES PASSED
try:
ra = float(ra)
if ra >= 0. and ra <= 360.:
self.log.info(
'RA seems to already be in decimal degrees, returning original value' % locals())
return float(ra)
except:
pass
# REMOVE SURROUNDING WHITESPACE
ra = str(ra).strip()
regex = re.compile(
'^(\+?(\d|[0-1]\d|2[0-3]))\D+([0-5]\d)\D+([0-6]?\d(\.\d*?)?)(s)?\s*?$')
raMatch = regex.match(ra)
if raMatch:
degrees = raMatch.group(1)
minutes = raMatch.group(3)
seconds = raMatch.group(4)
degrees = abs(float(degrees)) * 15.0
minutes = float(minutes) * 15.0
seconds = float(seconds) * 15.0
# PRECISION TEST
# 1s ARCSEC = .000018519 DEGREE
# THEREFORE REPORT SECONDS TO A PRECISION = INPUT PRECISION + 5
decimalLen = len(repr(seconds).split(".")[-1])
precision = decimalLen + 5
decimalDegrees = (degrees + (minutes / 60.0)
+ (seconds / 3600.0))
decimalDegrees = "%0.*f" % (precision, decimalDegrees)
else:
raise IOError(
"could not convert ra to decimal degrees, could not parse sexegesimal input. Original value was `%(ra)s`" % locals())
raDeg = decimalDegrees
self.log.debug('raDeg: %(decimalDegrees)s' % locals())
self.log.info(
'completed the ``ra_sexegesimal_to_decimal`` method')
return float(raDeg)
def dec_decimal_to_sexegesimal(
self,
dec,
delimiter=":"):
"""
*Convert a declination between decimal degrees and sexegesimal.*
Precision should be respected.
**Key Arguments:**
- ``dec`` -- DEC in decimal degrees. Will try and convert to float before performing calculation.
- ``delimiter`` -- how to delimit the RA units. Default *:*
**Return:**
- ``sexegesimal`` -- ra in sexegesimal units
**Usage:**
.. todo::
- replace dec_to_sex in dryxPython in all code
.. code-block:: python
from astrocalc.coords import unit_conversion
converter = unit_conversion(
log=log
)
dec = converter.dec_decimal_to_sexegesimal(
dec="-3.454676456",
delimiter=":"
)
print dec
# OUT: -03:27:16.8
"""
self.log.info('starting the ``dec_decimal_to_sexegesimal`` method')
import math
# CONVERT DEC TO FLOAT
try:
self.log.debug("attempting to convert RA to float")
dec = float(dec)
except Exception, e:
self.log.error(
"could not convert RA to float - failed with this error: %s " % (str(e),))
return -1
# COMPLAIN IF DEC NOT BETWEEN -90 - 90
if dec > -90. and dec < 90.:
pass
else:
self.log.error(
"DEC must be between -90 - 90 degrees")
return -1
if (dec >= 0):
hemisphere = '+'
else:
hemisphere = '-'
dec *= -1
# PRECISION TEST
# 1s = .000277778 DEGREE
# THEREFORE REPORT SECONDS TO A PRECISION = INPUT PRECISION - 4
decimalLen = len(repr(dec).split(".")[-1])
precision = decimalLen - 4
dec_deg = int(dec)
dec_mm = int((dec - dec_deg) * 60)
dec_ss = int(((dec - dec_deg) * 60 - dec_mm) * 60)
dec_f = (((dec - dec_deg) * 60 - dec_mm) * 60) - dec_ss
# SET PRECISION
dec_f = repr(dec_f)[2:]
dec_f = dec_f[:precision]
if len(dec_f):
dec_f = "." + dec_f
if precision < 0:
dec_f = ""
sexegesimal = hemisphere + '%02d' % dec_deg + delimiter + \
'%02d' % dec_mm + delimiter + '%02d' % dec_ss + dec_f
self.log.info('completed the ``dec_decimal_to_sexegesimal`` method')
return sexegesimal
# use the tab-trigger below for new method
def ra_dec_to_cartesian(
self,
ra,
dec):
"""*Convert an RA, DEC coordinate set to x, y, z cartesian coordinates*
**Key Arguments:**
- ``ra`` -- right ascension in sexegesimal or decimal degress.
- ``dec`` -- declination in sexegesimal or decimal degress.
**Return:**
- ``cartesians`` -- tuple of (x, y, z) coordinates
.. todo::
- replace calculate_cartesians in all code
**Usage:**
.. code-block:: python
from astrocalc.coords import unit_conversion
converter = unit_conversion(
log=log
)
x, y, z = converter.ra_dec_to_cartesian(
ra="23 45 21.23232",
dec="+01:58:5.45341"
)
print x, y, z
# OUTPUT: 0.9973699780687104, -0.06382462462791459, 0.034344492110465606
"""
self.log.info('starting the ``ra_dec_to_cartesian`` method')
ra = self.ra_sexegesimal_to_decimal(
ra=ra
)
dec = self.dec_sexegesimal_to_decimal(
dec=dec
)
ra = math.radians(ra)
dec = math.radians(dec)
cos_dec = math.cos(dec)
cx = math.cos(ra) * cos_dec
cy = math.sin(ra) * cos_dec
cz = math.sin(dec)
cartesians = (cx, cy, cz)
self.log.info('completed the ``ra_dec_to_cartesian`` method')
return cartesians
|
thespacedoctor/astrocalc
|
astrocalc/coords/unit_conversion.py
|
unit_conversion.dec_decimal_to_sexegesimal
|
python
|
def dec_decimal_to_sexegesimal(
self,
dec,
delimiter=":"):
self.log.info('starting the ``dec_decimal_to_sexegesimal`` method')
import math
# CONVERT DEC TO FLOAT
try:
self.log.debug("attempting to convert RA to float")
dec = float(dec)
except Exception, e:
self.log.error(
"could not convert RA to float - failed with this error: %s " % (str(e),))
return -1
# COMPLAIN IF DEC NOT BETWEEN -90 - 90
if dec > -90. and dec < 90.:
pass
else:
self.log.error(
"DEC must be between -90 - 90 degrees")
return -1
if (dec >= 0):
hemisphere = '+'
else:
hemisphere = '-'
dec *= -1
# PRECISION TEST
# 1s = .000277778 DEGREE
# THEREFORE REPORT SECONDS TO A PRECISION = INPUT PRECISION - 4
decimalLen = len(repr(dec).split(".")[-1])
precision = decimalLen - 4
dec_deg = int(dec)
dec_mm = int((dec - dec_deg) * 60)
dec_ss = int(((dec - dec_deg) * 60 - dec_mm) * 60)
dec_f = (((dec - dec_deg) * 60 - dec_mm) * 60) - dec_ss
# SET PRECISION
dec_f = repr(dec_f)[2:]
dec_f = dec_f[:precision]
if len(dec_f):
dec_f = "." + dec_f
if precision < 0:
dec_f = ""
sexegesimal = hemisphere + '%02d' % dec_deg + delimiter + \
'%02d' % dec_mm + delimiter + '%02d' % dec_ss + dec_f
self.log.info('completed the ``dec_decimal_to_sexegesimal`` method')
return sexegesimal
|
*Convert a declination between decimal degrees and sexegesimal.*
Precision should be respected.
**Key Arguments:**
- ``dec`` -- DEC in decimal degrees. Will try and convert to float before performing calculation.
- ``delimiter`` -- how to delimit the RA units. Default *:*
**Return:**
- ``sexegesimal`` -- ra in sexegesimal units
**Usage:**
.. todo::
- replace dec_to_sex in dryxPython in all code
.. code-block:: python
from astrocalc.coords import unit_conversion
converter = unit_conversion(
log=log
)
dec = converter.dec_decimal_to_sexegesimal(
dec="-3.454676456",
delimiter=":"
)
print dec
# OUT: -03:27:16.8
|
train
|
https://github.com/thespacedoctor/astrocalc/blob/dfbebf9b86d7b2d2110c48a6a4f4194bf8885b86/astrocalc/coords/unit_conversion.py#L363-L448
| null |
class unit_conversion():
"""
*The worker class for the unit_conversion module*
**Key Arguments:**
- ``log`` -- logger
- ``settings`` -- the settings dictionary (prob not required)
**Usage:**
.. todo::
- add usage info
- create a sublime snippet for usage
- add ra_sexegesimal_to_decimal
.. code-block:: python
usage code
.. todo::
- @review: when complete, clean unit_conversion class
- @review: when complete add logging
- @review: when complete, decide whether to abstract class to another module
"""
# Initialisation
# 1. @flagged: what are the unique attrributes for each object? Add them
# to __init__
def __init__(
self,
log,
settings=False
):
self.log = log
log.debug("instansiating a new 'unit_conversion' object")
self.settings = settings
# xt-self-arg-tmpx
# 2. @flagged: what are the default attrributes each object could have? Add them to variable attribute set here
# Variable Data Atrributes
# 3. @flagged: what variable attrributes need overriden in any baseclass(es) used
# Override Variable Data Atrributes
# Initial Actions
return None
# 4. @flagged: what actions does each object have to be able to perform? Add them here
# Method Attributes
def get(self):
"""
*get the unit_conversion object*
**Return:**
- ``unit_conversion``
.. todo::
- @review: when complete, clean get method
- @review: when complete add logging
"""
self.log.info('starting the ``get`` method')
unit_conversion = None
self.log.info('completed the ``get`` method')
return unit_conversion
def dec_sexegesimal_to_decimal(
self,
dec):
"""
*Convert a declination from sexegesimal format to decimal degrees.*
Precision should be respected. If a float is passed to this method, the same float will be returned (useful if unclear which format coordinates are in).
The code will attempt to read the sexegesimal value in whatever form it is passed. Any of the following should be handled correctly:
- ``+1:58:05.45341``
- ``01:5:05``
- ``+1 58 05.45341``
- ``-23h53m05s``
**Key Arguments:**
- ``dec`` - DEC in sexegesimal format.
**Return:**
- ``decDeg`` -- declination converted to decimal degrees
**Usage:**
.. todo::
- replace dryxPython declination_sexegesimal_to_decimal with this version in all my code
- replace coords_sex_to_dec in all code
.. code-block:: python
from astrocalc.coords import unit_conversion
converter = unit_conversion(
log=log
)
dec = converter.dec_sexegesimal_to_decimal(
dec="-23:45:21.23232"
)
print dec
# OUTPUT: -23.7558978667
"""
self.log.info(
'starting the ``dec_sexegesimal_to_decimal`` method')
import re
# TEST TO SEE IF DECIMAL DEGREES PASSED
try:
dec = float(dec)
if dec > -90. and dec < 90.:
self.log.info(
'declination seems to already be in decimal degrees, returning original value' % locals())
return float(dec)
except:
pass
# REMOVE SURROUNDING WHITESPACE
dec = str(dec).strip()
# LOOK FOR A MINUS SIGN. NOTE THAT -00 IS THE SAME AS 00.
regex = re.compile(
'^([\+\-]?(\d|[0-8]\d))\D+([0-5]\d)\D+([0-6]?\d(\.\d+)?)$')
decMatch = regex.match(dec)
if decMatch:
degrees = decMatch.group(1)
minutes = decMatch.group(3)
seconds = decMatch.group(4)
if degrees[0] == '-':
sgn = -1
else:
sgn = 1
degrees = abs(float(degrees))
minutes = float(minutes)
seconds = float(seconds)
# PRECISION TEST
# 1s = .000277778 DEGREE
# THEREFORE REPORT SECONDS TO A PRECISION = INPUT PRECISION + 4
decimalLen = len(repr(seconds).split(".")[-1])
precision = decimalLen + 4
decDeg = (degrees + (minutes / 60.0)
+ (seconds / 3600.0)) * sgn
decDeg = "%0.*f" % (precision, decDeg)
else:
raise IOError(
"could not convert dec to decimal degrees, could not parse sexegesimal input. Original value was `%(dec)s`" % locals())
decDeg = float(decDeg)
self.log.debug('decDeg: %(decDeg)s' % locals())
self.log.info(
'completed the ``dec_sexegesimal_to_decimal`` method')
return float(decDeg)
def ra_sexegesimal_to_decimal(
self,
ra
):
"""
*Convert a right-ascension from sexegesimal format to decimal degrees.*
Precision should be respected. If a float is passed to this method, the same float will be returned (useful if unclear which format coordinates are in).
The code will attempt to read the sexegesimal value in whatever form it is passed. Any of the following should be handled correctly
- ``23:45:21.23232``
- ``23h45m21.23232s``
- ``23 45 21.23232``
- ``2 04 21.23232``
- ``04:45 21``
**Key Arguments:**
- ``ra`` -- ra in sexegesimal units
**Return:**
- ``decimalDegrees``
**Usage:**
.. code-block:: python
- replace dryxPython ra_sexegesimal_to_decimal with this version in all my code
from astrocalc.coords import unit_conversion
converter = unit_conversion(
log=log
)
ra = converter.ra_sexegesimal_to_decimal(
ra="04:45 21"
)
print ra
# OUTPUT: 71.3375
"""
import re
# TEST TO SEE IF DECIMAL DEGREES PASSED
try:
ra = float(ra)
if ra >= 0. and ra <= 360.:
self.log.info(
'RA seems to already be in decimal degrees, returning original value' % locals())
return float(ra)
except:
pass
# REMOVE SURROUNDING WHITESPACE
ra = str(ra).strip()
regex = re.compile(
'^(\+?(\d|[0-1]\d|2[0-3]))\D+([0-5]\d)\D+([0-6]?\d(\.\d*?)?)(s)?\s*?$')
raMatch = regex.match(ra)
if raMatch:
degrees = raMatch.group(1)
minutes = raMatch.group(3)
seconds = raMatch.group(4)
degrees = abs(float(degrees)) * 15.0
minutes = float(minutes) * 15.0
seconds = float(seconds) * 15.0
# PRECISION TEST
# 1s ARCSEC = .000018519 DEGREE
# THEREFORE REPORT SECONDS TO A PRECISION = INPUT PRECISION + 5
decimalLen = len(repr(seconds).split(".")[-1])
precision = decimalLen + 5
decimalDegrees = (degrees + (minutes / 60.0)
+ (seconds / 3600.0))
decimalDegrees = "%0.*f" % (precision, decimalDegrees)
else:
raise IOError(
"could not convert ra to decimal degrees, could not parse sexegesimal input. Original value was `%(ra)s`" % locals())
raDeg = decimalDegrees
self.log.debug('raDeg: %(decimalDegrees)s' % locals())
self.log.info(
'completed the ``ra_sexegesimal_to_decimal`` method')
return float(raDeg)
def ra_decimal_to_sexegesimal(
self,
ra,
delimiter=":"):
"""
*Convert a right-ascension between decimal degrees and sexegesimal.*
Precision should be respected.
**Key Arguments:**
- ``ra`` -- RA in decimal degrees. Will try and convert to float before performing calculation.
- ``delimiter`` -- how to delimit the RA units. Default *:*
**Return:**
- ``sexegesimal`` -- ra in sexegesimal units
**Usage:**
.. todo::
- replace ra_to_sex from dryxPython in all code
.. code-block:: python
from astrocalc.coords import unit_conversion
converter = unit_conversion(
log=log
)
ra = converter.ra_decimal_to_sexegesimal(
ra="-23.454676456",
delimiter=":"
)
print ra
# OUT: 22:26:10.87
"""
self.log.info('starting the ``ra_decimal_to_sexegesimal`` method')
# CONVERT RA TO FLOAT
try:
self.log.debug("attempting to convert RA to float")
ra = float(ra)
except Exception, e:
self.log.error(
"could not convert RA to float - failed with this error: %s " % (str(e),))
return -1
# COMPLAIN IF RA NOT BETWEEN -360 - 360
if ra > 0. and ra < 360.:
pass
elif ra < 0 and ra > -360.:
ra = 360. + ra
else:
self.log.error(
"RA must be between 0 - 360 degrees")
return -1
# PRECISION TEST
# 1s ARCSEC = .000018519 DEGREE
# THEREFORE REPORT SECONDS TO A PRECISION = INPUT PRECISION - 5
decimalLen = len(repr(ra).split(".")[-1])
precision = decimalLen - 5
# CALCULATION FROM DECIMAL DEGREES
import math
ra_hh = int(ra / 15)
ra_mm = int((ra / 15 - ra_hh) * 60)
ra_ss = int(((ra / 15 - ra_hh) * 60 - ra_mm) * 60)
ra_ff = ((ra / 15 - ra_hh) * 60 - ra_mm) * 60 - ra_ss
# SET PRECISION
ra_ff = repr(ra_ff)[2:]
ra_ff = ra_ff[:precision]
if len(ra_ff):
ra_ff = "." + ra_ff
if precision < 0:
ra_ff = ""
sexegesimal = '%02d' % ra_hh + delimiter + '%02d' % ra_mm + \
delimiter + '%02d' % ra_ss + ra_ff
self.log.info('completed the ``ra_decimal_to_sexegesimal`` method')
return sexegesimal
# use the tab-trigger below for new method
def ra_dec_to_cartesian(
self,
ra,
dec):
"""*Convert an RA, DEC coordinate set to x, y, z cartesian coordinates*
**Key Arguments:**
- ``ra`` -- right ascension in sexegesimal or decimal degress.
- ``dec`` -- declination in sexegesimal or decimal degress.
**Return:**
- ``cartesians`` -- tuple of (x, y, z) coordinates
.. todo::
- replace calculate_cartesians in all code
**Usage:**
.. code-block:: python
from astrocalc.coords import unit_conversion
converter = unit_conversion(
log=log
)
x, y, z = converter.ra_dec_to_cartesian(
ra="23 45 21.23232",
dec="+01:58:5.45341"
)
print x, y, z
# OUTPUT: 0.9973699780687104, -0.06382462462791459, 0.034344492110465606
"""
self.log.info('starting the ``ra_dec_to_cartesian`` method')
ra = self.ra_sexegesimal_to_decimal(
ra=ra
)
dec = self.dec_sexegesimal_to_decimal(
dec=dec
)
ra = math.radians(ra)
dec = math.radians(dec)
cos_dec = math.cos(dec)
cx = math.cos(ra) * cos_dec
cy = math.sin(ra) * cos_dec
cz = math.sin(dec)
cartesians = (cx, cy, cz)
self.log.info('completed the ``ra_dec_to_cartesian`` method')
return cartesians
|
thespacedoctor/astrocalc
|
astrocalc/coords/unit_conversion.py
|
unit_conversion.ra_dec_to_cartesian
|
python
|
def ra_dec_to_cartesian(
self,
ra,
dec):
self.log.info('starting the ``ra_dec_to_cartesian`` method')
ra = self.ra_sexegesimal_to_decimal(
ra=ra
)
dec = self.dec_sexegesimal_to_decimal(
dec=dec
)
ra = math.radians(ra)
dec = math.radians(dec)
cos_dec = math.cos(dec)
cx = math.cos(ra) * cos_dec
cy = math.sin(ra) * cos_dec
cz = math.sin(dec)
cartesians = (cx, cy, cz)
self.log.info('completed the ``ra_dec_to_cartesian`` method')
return cartesians
|
*Convert an RA, DEC coordinate set to x, y, z cartesian coordinates*
**Key Arguments:**
- ``ra`` -- right ascension in sexegesimal or decimal degress.
- ``dec`` -- declination in sexegesimal or decimal degress.
**Return:**
- ``cartesians`` -- tuple of (x, y, z) coordinates
.. todo::
- replace calculate_cartesians in all code
**Usage:**
.. code-block:: python
from astrocalc.coords import unit_conversion
converter = unit_conversion(
log=log
)
x, y, z = converter.ra_dec_to_cartesian(
ra="23 45 21.23232",
dec="+01:58:5.45341"
)
print x, y, z
# OUTPUT: 0.9973699780687104, -0.06382462462791459, 0.034344492110465606
|
train
|
https://github.com/thespacedoctor/astrocalc/blob/dfbebf9b86d7b2d2110c48a6a4f4194bf8885b86/astrocalc/coords/unit_conversion.py#L451-L503
|
[
"def dec_sexegesimal_to_decimal(\n self,\n dec):\n \"\"\"\n *Convert a declination from sexegesimal format to decimal degrees.*\n\n Precision should be respected. If a float is passed to this method, the same float will be returned (useful if unclear which format coordinates are in).\n\n The code will attempt to read the sexegesimal value in whatever form it is passed. Any of the following should be handled correctly:\n\n - ``+1:58:05.45341``\n - ``01:5:05``\n - ``+1 58 05.45341``\n - ``-23h53m05s``\n\n **Key Arguments:**\n - ``dec`` - DEC in sexegesimal format.\n\n **Return:**\n - ``decDeg`` -- declination converted to decimal degrees\n\n **Usage:**\n\n .. todo::\n\n - replace dryxPython declination_sexegesimal_to_decimal with this version in all my code\n - replace coords_sex_to_dec in all code\n\n .. code-block:: python\n\n from astrocalc.coords import unit_conversion\n converter = unit_conversion(\n log=log\n )\n dec = converter.dec_sexegesimal_to_decimal(\n dec=\"-23:45:21.23232\"\n )\n print dec\n\n # OUTPUT: -23.7558978667\n \"\"\"\n self.log.info(\n 'starting the ``dec_sexegesimal_to_decimal`` method')\n\n import re\n\n # TEST TO SEE IF DECIMAL DEGREES PASSED\n try:\n dec = float(dec)\n if dec > -90. and dec < 90.:\n self.log.info(\n 'declination seems to already be in decimal degrees, returning original value' % locals())\n return float(dec)\n except:\n pass\n\n # REMOVE SURROUNDING WHITESPACE\n dec = str(dec).strip()\n\n # LOOK FOR A MINUS SIGN. NOTE THAT -00 IS THE SAME AS 00.\n regex = re.compile(\n '^([\\+\\-]?(\\d|[0-8]\\d))\\D+([0-5]\\d)\\D+([0-6]?\\d(\\.\\d+)?)$')\n decMatch = regex.match(dec)\n\n if decMatch:\n degrees = decMatch.group(1)\n minutes = decMatch.group(3)\n seconds = decMatch.group(4)\n\n if degrees[0] == '-':\n sgn = -1\n else:\n sgn = 1\n\n degrees = abs(float(degrees))\n minutes = float(minutes)\n seconds = float(seconds)\n\n # PRECISION TEST\n # 1s = .000277778 DEGREE\n # THEREFORE REPORT SECONDS TO A PRECISION = INPUT PRECISION + 4\n decimalLen = len(repr(seconds).split(\".\")[-1])\n precision = decimalLen + 4\n\n decDeg = (degrees + (minutes / 60.0)\n + (seconds / 3600.0)) * sgn\n\n decDeg = \"%0.*f\" % (precision, decDeg)\n\n else:\n raise IOError(\n \"could not convert dec to decimal degrees, could not parse sexegesimal input. Original value was `%(dec)s`\" % locals())\n\n decDeg = float(decDeg)\n self.log.debug('decDeg: %(decDeg)s' % locals())\n self.log.info(\n 'completed the ``dec_sexegesimal_to_decimal`` method')\n\n return float(decDeg)\n",
"def ra_sexegesimal_to_decimal(\n self,\n ra\n):\n \"\"\"\n *Convert a right-ascension from sexegesimal format to decimal degrees.*\n\n Precision should be respected. If a float is passed to this method, the same float will be returned (useful if unclear which format coordinates are in).\n\n The code will attempt to read the sexegesimal value in whatever form it is passed. Any of the following should be handled correctly\n\n - ``23:45:21.23232``\n - ``23h45m21.23232s``\n - ``23 45 21.23232``\n - ``2 04 21.23232``\n - ``04:45 21``\n\n **Key Arguments:**\n - ``ra`` -- ra in sexegesimal units\n\n **Return:**\n - ``decimalDegrees``\n\n **Usage:**\n\n .. code-block:: python\n\n - replace dryxPython ra_sexegesimal_to_decimal with this version in all my code\n\n from astrocalc.coords import unit_conversion\n converter = unit_conversion(\n log=log\n )\n ra = converter.ra_sexegesimal_to_decimal(\n ra=\"04:45 21\"\n )\n print ra\n\n # OUTPUT: 71.3375\n \"\"\"\n import re\n\n # TEST TO SEE IF DECIMAL DEGREES PASSED\n try:\n ra = float(ra)\n if ra >= 0. and ra <= 360.:\n self.log.info(\n 'RA seems to already be in decimal degrees, returning original value' % locals())\n return float(ra)\n except:\n pass\n\n # REMOVE SURROUNDING WHITESPACE\n ra = str(ra).strip()\n\n regex = re.compile(\n '^(\\+?(\\d|[0-1]\\d|2[0-3]))\\D+([0-5]\\d)\\D+([0-6]?\\d(\\.\\d*?)?)(s)?\\s*?$')\n raMatch = regex.match(ra)\n\n if raMatch:\n degrees = raMatch.group(1)\n minutes = raMatch.group(3)\n seconds = raMatch.group(4)\n\n degrees = abs(float(degrees)) * 15.0\n minutes = float(minutes) * 15.0\n seconds = float(seconds) * 15.0\n\n # PRECISION TEST\n # 1s ARCSEC = .000018519 DEGREE\n # THEREFORE REPORT SECONDS TO A PRECISION = INPUT PRECISION + 5\n decimalLen = len(repr(seconds).split(\".\")[-1])\n precision = decimalLen + 5\n\n decimalDegrees = (degrees + (minutes / 60.0)\n + (seconds / 3600.0))\n\n decimalDegrees = \"%0.*f\" % (precision, decimalDegrees)\n\n else:\n raise IOError(\n \"could not convert ra to decimal degrees, could not parse sexegesimal input. Original value was `%(ra)s`\" % locals())\n\n raDeg = decimalDegrees\n self.log.debug('raDeg: %(decimalDegrees)s' % locals())\n self.log.info(\n 'completed the ``ra_sexegesimal_to_decimal`` method')\n\n return float(raDeg)\n"
] |
class unit_conversion():
"""
*The worker class for the unit_conversion module*
**Key Arguments:**
- ``log`` -- logger
- ``settings`` -- the settings dictionary (prob not required)
**Usage:**
.. todo::
- add usage info
- create a sublime snippet for usage
- add ra_sexegesimal_to_decimal
.. code-block:: python
usage code
.. todo::
- @review: when complete, clean unit_conversion class
- @review: when complete add logging
- @review: when complete, decide whether to abstract class to another module
"""
# Initialisation
# 1. @flagged: what are the unique attrributes for each object? Add them
# to __init__
def __init__(
self,
log,
settings=False
):
self.log = log
log.debug("instansiating a new 'unit_conversion' object")
self.settings = settings
# xt-self-arg-tmpx
# 2. @flagged: what are the default attrributes each object could have? Add them to variable attribute set here
# Variable Data Atrributes
# 3. @flagged: what variable attrributes need overriden in any baseclass(es) used
# Override Variable Data Atrributes
# Initial Actions
return None
# 4. @flagged: what actions does each object have to be able to perform? Add them here
# Method Attributes
def get(self):
"""
*get the unit_conversion object*
**Return:**
- ``unit_conversion``
.. todo::
- @review: when complete, clean get method
- @review: when complete add logging
"""
self.log.info('starting the ``get`` method')
unit_conversion = None
self.log.info('completed the ``get`` method')
return unit_conversion
def dec_sexegesimal_to_decimal(
self,
dec):
"""
*Convert a declination from sexegesimal format to decimal degrees.*
Precision should be respected. If a float is passed to this method, the same float will be returned (useful if unclear which format coordinates are in).
The code will attempt to read the sexegesimal value in whatever form it is passed. Any of the following should be handled correctly:
- ``+1:58:05.45341``
- ``01:5:05``
- ``+1 58 05.45341``
- ``-23h53m05s``
**Key Arguments:**
- ``dec`` - DEC in sexegesimal format.
**Return:**
- ``decDeg`` -- declination converted to decimal degrees
**Usage:**
.. todo::
- replace dryxPython declination_sexegesimal_to_decimal with this version in all my code
- replace coords_sex_to_dec in all code
.. code-block:: python
from astrocalc.coords import unit_conversion
converter = unit_conversion(
log=log
)
dec = converter.dec_sexegesimal_to_decimal(
dec="-23:45:21.23232"
)
print dec
# OUTPUT: -23.7558978667
"""
self.log.info(
'starting the ``dec_sexegesimal_to_decimal`` method')
import re
# TEST TO SEE IF DECIMAL DEGREES PASSED
try:
dec = float(dec)
if dec > -90. and dec < 90.:
self.log.info(
'declination seems to already be in decimal degrees, returning original value' % locals())
return float(dec)
except:
pass
# REMOVE SURROUNDING WHITESPACE
dec = str(dec).strip()
# LOOK FOR A MINUS SIGN. NOTE THAT -00 IS THE SAME AS 00.
regex = re.compile(
'^([\+\-]?(\d|[0-8]\d))\D+([0-5]\d)\D+([0-6]?\d(\.\d+)?)$')
decMatch = regex.match(dec)
if decMatch:
degrees = decMatch.group(1)
minutes = decMatch.group(3)
seconds = decMatch.group(4)
if degrees[0] == '-':
sgn = -1
else:
sgn = 1
degrees = abs(float(degrees))
minutes = float(minutes)
seconds = float(seconds)
# PRECISION TEST
# 1s = .000277778 DEGREE
# THEREFORE REPORT SECONDS TO A PRECISION = INPUT PRECISION + 4
decimalLen = len(repr(seconds).split(".")[-1])
precision = decimalLen + 4
decDeg = (degrees + (minutes / 60.0)
+ (seconds / 3600.0)) * sgn
decDeg = "%0.*f" % (precision, decDeg)
else:
raise IOError(
"could not convert dec to decimal degrees, could not parse sexegesimal input. Original value was `%(dec)s`" % locals())
decDeg = float(decDeg)
self.log.debug('decDeg: %(decDeg)s' % locals())
self.log.info(
'completed the ``dec_sexegesimal_to_decimal`` method')
return float(decDeg)
def ra_sexegesimal_to_decimal(
self,
ra
):
"""
*Convert a right-ascension from sexegesimal format to decimal degrees.*
Precision should be respected. If a float is passed to this method, the same float will be returned (useful if unclear which format coordinates are in).
The code will attempt to read the sexegesimal value in whatever form it is passed. Any of the following should be handled correctly
- ``23:45:21.23232``
- ``23h45m21.23232s``
- ``23 45 21.23232``
- ``2 04 21.23232``
- ``04:45 21``
**Key Arguments:**
- ``ra`` -- ra in sexegesimal units
**Return:**
- ``decimalDegrees``
**Usage:**
.. code-block:: python
- replace dryxPython ra_sexegesimal_to_decimal with this version in all my code
from astrocalc.coords import unit_conversion
converter = unit_conversion(
log=log
)
ra = converter.ra_sexegesimal_to_decimal(
ra="04:45 21"
)
print ra
# OUTPUT: 71.3375
"""
import re
# TEST TO SEE IF DECIMAL DEGREES PASSED
try:
ra = float(ra)
if ra >= 0. and ra <= 360.:
self.log.info(
'RA seems to already be in decimal degrees, returning original value' % locals())
return float(ra)
except:
pass
# REMOVE SURROUNDING WHITESPACE
ra = str(ra).strip()
regex = re.compile(
'^(\+?(\d|[0-1]\d|2[0-3]))\D+([0-5]\d)\D+([0-6]?\d(\.\d*?)?)(s)?\s*?$')
raMatch = regex.match(ra)
if raMatch:
degrees = raMatch.group(1)
minutes = raMatch.group(3)
seconds = raMatch.group(4)
degrees = abs(float(degrees)) * 15.0
minutes = float(minutes) * 15.0
seconds = float(seconds) * 15.0
# PRECISION TEST
# 1s ARCSEC = .000018519 DEGREE
# THEREFORE REPORT SECONDS TO A PRECISION = INPUT PRECISION + 5
decimalLen = len(repr(seconds).split(".")[-1])
precision = decimalLen + 5
decimalDegrees = (degrees + (minutes / 60.0)
+ (seconds / 3600.0))
decimalDegrees = "%0.*f" % (precision, decimalDegrees)
else:
raise IOError(
"could not convert ra to decimal degrees, could not parse sexegesimal input. Original value was `%(ra)s`" % locals())
raDeg = decimalDegrees
self.log.debug('raDeg: %(decimalDegrees)s' % locals())
self.log.info(
'completed the ``ra_sexegesimal_to_decimal`` method')
return float(raDeg)
def ra_decimal_to_sexegesimal(
self,
ra,
delimiter=":"):
"""
*Convert a right-ascension between decimal degrees and sexegesimal.*
Precision should be respected.
**Key Arguments:**
- ``ra`` -- RA in decimal degrees. Will try and convert to float before performing calculation.
- ``delimiter`` -- how to delimit the RA units. Default *:*
**Return:**
- ``sexegesimal`` -- ra in sexegesimal units
**Usage:**
.. todo::
- replace ra_to_sex from dryxPython in all code
.. code-block:: python
from astrocalc.coords import unit_conversion
converter = unit_conversion(
log=log
)
ra = converter.ra_decimal_to_sexegesimal(
ra="-23.454676456",
delimiter=":"
)
print ra
# OUT: 22:26:10.87
"""
self.log.info('starting the ``ra_decimal_to_sexegesimal`` method')
# CONVERT RA TO FLOAT
try:
self.log.debug("attempting to convert RA to float")
ra = float(ra)
except Exception, e:
self.log.error(
"could not convert RA to float - failed with this error: %s " % (str(e),))
return -1
# COMPLAIN IF RA NOT BETWEEN -360 - 360
if ra > 0. and ra < 360.:
pass
elif ra < 0 and ra > -360.:
ra = 360. + ra
else:
self.log.error(
"RA must be between 0 - 360 degrees")
return -1
# PRECISION TEST
# 1s ARCSEC = .000018519 DEGREE
# THEREFORE REPORT SECONDS TO A PRECISION = INPUT PRECISION - 5
decimalLen = len(repr(ra).split(".")[-1])
precision = decimalLen - 5
# CALCULATION FROM DECIMAL DEGREES
import math
ra_hh = int(ra / 15)
ra_mm = int((ra / 15 - ra_hh) * 60)
ra_ss = int(((ra / 15 - ra_hh) * 60 - ra_mm) * 60)
ra_ff = ((ra / 15 - ra_hh) * 60 - ra_mm) * 60 - ra_ss
# SET PRECISION
ra_ff = repr(ra_ff)[2:]
ra_ff = ra_ff[:precision]
if len(ra_ff):
ra_ff = "." + ra_ff
if precision < 0:
ra_ff = ""
sexegesimal = '%02d' % ra_hh + delimiter + '%02d' % ra_mm + \
delimiter + '%02d' % ra_ss + ra_ff
self.log.info('completed the ``ra_decimal_to_sexegesimal`` method')
return sexegesimal
def dec_decimal_to_sexegesimal(
self,
dec,
delimiter=":"):
"""
*Convert a declination between decimal degrees and sexegesimal.*
Precision should be respected.
**Key Arguments:**
- ``dec`` -- DEC in decimal degrees. Will try and convert to float before performing calculation.
- ``delimiter`` -- how to delimit the RA units. Default *:*
**Return:**
- ``sexegesimal`` -- ra in sexegesimal units
**Usage:**
.. todo::
- replace dec_to_sex in dryxPython in all code
.. code-block:: python
from astrocalc.coords import unit_conversion
converter = unit_conversion(
log=log
)
dec = converter.dec_decimal_to_sexegesimal(
dec="-3.454676456",
delimiter=":"
)
print dec
# OUT: -03:27:16.8
"""
self.log.info('starting the ``dec_decimal_to_sexegesimal`` method')
import math
# CONVERT DEC TO FLOAT
try:
self.log.debug("attempting to convert RA to float")
dec = float(dec)
except Exception, e:
self.log.error(
"could not convert RA to float - failed with this error: %s " % (str(e),))
return -1
# COMPLAIN IF DEC NOT BETWEEN -90 - 90
if dec > -90. and dec < 90.:
pass
else:
self.log.error(
"DEC must be between -90 - 90 degrees")
return -1
if (dec >= 0):
hemisphere = '+'
else:
hemisphere = '-'
dec *= -1
# PRECISION TEST
# 1s = .000277778 DEGREE
# THEREFORE REPORT SECONDS TO A PRECISION = INPUT PRECISION - 4
decimalLen = len(repr(dec).split(".")[-1])
precision = decimalLen - 4
dec_deg = int(dec)
dec_mm = int((dec - dec_deg) * 60)
dec_ss = int(((dec - dec_deg) * 60 - dec_mm) * 60)
dec_f = (((dec - dec_deg) * 60 - dec_mm) * 60) - dec_ss
# SET PRECISION
dec_f = repr(dec_f)[2:]
dec_f = dec_f[:precision]
if len(dec_f):
dec_f = "." + dec_f
if precision < 0:
dec_f = ""
sexegesimal = hemisphere + '%02d' % dec_deg + delimiter + \
'%02d' % dec_mm + delimiter + '%02d' % dec_ss + dec_f
self.log.info('completed the ``dec_decimal_to_sexegesimal`` method')
return sexegesimal
# use the tab-trigger below for new method
|
thespacedoctor/astrocalc
|
astrocalc/distances/converter.py
|
converter.distance_to_redshift
|
python
|
def distance_to_redshift(
self,
mpc):
self.log.info('starting the ``distance_to_redshift`` method')
lowerLimit = 0.
upperLimit = 30.
redshift = upperLimit - lowerLimit
distGuess = float(self.redshift_to_distance(redshift)['dl_mpc'])
distDiff = mpc - distGuess
while math.fabs(distDiff) > 0.0001:
if distGuess < mpc:
lowerLimit = redshift
redshift = lowerLimit + (upperLimit - lowerLimit) / 2.
distGuess = float(
self.redshift_to_distance(redshift)['dl_mpc'])
elif distGuess > mpc:
upperLimit = redshift
redshift = lowerLimit + (upperLimit - lowerLimit) / 2.
distGuess = float(
self.redshift_to_distance(redshift)['dl_mpc'])
distDiff = mpc - distGuess
redshift = float("%5.4f" % (redshift,))
self.log.info('completed the ``distance_to_redshift`` method')
return redshift
|
*Convert a distance from MPC to redshift*
The code works by iteratively converting a redshift to a distance, correcting itself and honing in on the true answer (within a certain precision)
**Key Arguments:**
- ``mpc`` -- distance in MPC (assumes a luminousity distance).
**Return:**
- ``redshift``
.. todo::
- replace convert_mpc_to_redshift in all code
**Usage:**
.. code-block:: python
from astrocalc.distances import converter
c = converter(log=log)
z = c.distance_to_redshift(
mpc=500
)
print z
# OUTPUT: 0.108
|
train
|
https://github.com/thespacedoctor/astrocalc/blob/dfbebf9b86d7b2d2110c48a6a4f4194bf8885b86/astrocalc/distances/converter.py#L55-L111
|
[
"def redshift_to_distance(\n self,\n z,\n WM=0.3,\n WV=0.7,\n H0=70.0):\n \"\"\"*convert redshift to various distance measurements*\n\n **Key Arguments:**\n - ``z`` -- redshift measurement.\n - ``WM`` -- Omega_matter. Default *0.3*\n - ``WV`` -- Omega_vacuum. Default *0.7*\n - ``H0`` -- Hubble constant. (km s-1 Mpc-1) Default *70.0*\n\n **Return:**\n - ``results`` -- result dictionary including\n - ``dcmr_mpc`` -- co-moving radius distance\n - ``da_mpc`` -- angular distance\n - ``da_scale`` -- angular distance scale\n - ``dl_mpc`` -- luminosity distance (usually use this one)\n - ``dmod`` -- distance modulus (determined from luminosity distance)\n\n .. todo::\n\n - replace convert_redshift_to_distance in all other code\n\n **Usage:**\n\n .. code-block:: python\n\n from astrocalc.distances import converter\n c = converter(log=log)\n dists = c.redshift_to_distance(\n z=0.343\n )\n\n print \"Distance Modulus: \" + str(dists[\"dmod\"]) + \" mag\"\n print \"Luminousity Distance: \" + str(dists[\"dl_mpc\"]) + \" Mpc\"\n print \"Angular Size Scale: \" + str(dists[\"da_scale\"]) + \" kpc/arcsec\"\n print \"Angular Size Distance: \" + str(dists[\"da_mpc\"]) + \" Mpc\"\n print \"Comoving Radial Distance: \" + str(dists[\"dcmr_mpc\"]) + \" Mpc\"\n\n # OUTPUT :\n # Distance Modulus: 41.27 mag\n # Luminousity Distance: 1795.16 Mpc\n # Angular Size Scale: 4.85 kpc/arcsec\n # Angular Size Distance: 999.76 Mpc\n # Comoving Radial Distance: 1339.68 Mpc\n\n from astrocalc.distances import converter\n c = converter(log=log)\n dists = c.redshift_to_distance(\n z=0.343,\n WM=0.286,\n WV=0.714,\n H0=69.6\n )\n\n print \"Distance Modulus: \" + str(dists[\"dmod\"]) + \" mag\"\n print \"Luminousity Distance: \" + str(dists[\"dl_mpc\"]) + \" Mpc\"\n print \"Angular Size Scale: \" + str(dists[\"da_scale\"]) + \" kpc/arcsec\"\n print \"Angular Size Distance: \" + str(dists[\"da_mpc\"]) + \" Mpc\"\n print \"Comoving Radial Distance: \" + str(dists[\"dcmr_mpc\"]) + \" Mpc\"\n\n # OUTPUT :\n # Distance Modulus: 41.29 mag\n # Luminousity Distance: 1811.71 Mpc\n # Angular Size Scale: 4.89 kpc/arcsec\n # Angular Size Distance: 1008.97 Mpc\n # Comoving Radial Distance: 1352.03 Mpc\n\n \"\"\"\n self.log.info('starting the ``redshift_to_distance`` method')\n\n # VARIABLE\n h = H0 / 100.0\n WR = 4.165E-5 / (h * h) # Omega_radiation\n WK = 1.0 - WM - WV - WR # Omega_curvature = 1 - Omega(Total)\n c = 299792.458 # speed of light (km/s)\n\n # Arbitrarily set the values of these variables to zero just so we can\n # define them.\n DCMR = 0.0 # comoving radial distance in units of c/H0\n DCMR_Mpc = 0.0 # comoving radial distance in units of Mpc\n DA = 0.0 # angular size distance in units of c/H0\n DA_Mpc = 0.0 # angular size distance in units of Mpc\n # scale at angular size distance in units of Kpc / arcsec\n DA_scale = 0.0\n DL = 0.0 # luminosity distance in units of c/H0\n DL_Mpc = 0.0 # luminosity distance in units of Mpc\n # Distance modulus determined from luminosity distance\n DMOD = 0.0\n a = 0.0 # 1/(1+z), the scale factor of the Universe\n\n az = 1.0 / (1.0 + z) # 1/(1+z), for the given redshift\n\n # Compute the integral over a=1/(1+z) from az to 1 in n steps\n n = 1000\n for i in range(n):\n a = az + (1.0 - az) * (i + 0.5) / n\n adot = math.sqrt(WK + (WM / a) + (WR / (math.pow(a, 2)))\n + (WV * math.pow(a, 2)))\n DCMR = DCMR + 1.0 / (a * adot)\n\n # comoving radial distance in units of c/H0\n DCMR = (1.0 - az) * DCMR / n\n # comoving radial distance in units of Mpc\n DCMR_Mpc = (c / H0) * DCMR\n\n # Tangental comoving radial distance\n x = math.sqrt(abs(WK)) * DCMR\n if x > 0.1:\n if WK > 0.0:\n ratio = 0.5 * (math.exp(x) - math.exp(-x)) / x\n else:\n ratio = math.sin(x) / x\n else:\n y = math.pow(x, 2)\n if WK < 0.0:\n y = -y\n ratio = 1 + y / 6.0 + math.pow(y, 2) / 120.0\n\n DA = az * ratio * DCMR # angular size distance in units of c/H0\n DA_Mpc = (c / H0) * DA # angular size distance in units of Mpc\n # scale at angular size distance in units of Kpc / arcsec\n DA_scale = DA_Mpc / 206.264806\n DL = DA / math.pow(az, 2) # luminosity distance in units of c/H0\n DL_Mpc = (c / H0) * DL # luminosity distance in units of Mpc\n # Distance modulus determined from luminosity distance\n DMOD = 5 * math.log10(DL_Mpc * 1e6) - 5\n\n # FIXING PRECISIONS\n # PRECISION TEST\n precision = len(repr(z).split(\".\")[-1])\n DCMR_Mpc = \"%0.*f\" % (precision, DCMR_Mpc)\n DA_Mpc = \"%0.*f\" % (precision, DA_Mpc)\n DA_scale = \"%0.*f\" % (precision, DA_scale)\n DL_Mpc = \"%0.*f\" % (precision, DL_Mpc)\n DMOD = \"%0.*f\" % (precision, DMOD)\n z = \"%0.*f\" % (precision, z)\n\n results = \\\n {\n \"dcmr_mpc\": float(DCMR_Mpc),\n \"da_mpc\": float(DA_Mpc),\n \"da_scale\": float(DA_scale),\n \"dl_mpc\": float(DL_Mpc),\n \"dmod\": float(DMOD),\n \"z\": float(z)\n }\n\n self.log.info('completed the ``redshift_to_distance`` method')\n return results\n"
] |
class converter():
"""
*A converter to switch distance between various units of measurement*
**Key Arguments:**
- ``log`` -- logger
- ``settings`` -- the settings dictionary
**Usage:**
To instantiate a ``converter`` object:
.. code-block:: python
from astrocalc.distances import converter
c = converter(log=log)
"""
# Initialisation
def __init__(
self,
log,
settings=False,
):
self.log = log
log.debug("instansiating a new 'converter' object")
self.settings = settings
# xt-self-arg-tmpx
# Initial Actions
return None
def redshift_to_distance(
self,
z,
WM=0.3,
WV=0.7,
H0=70.0):
"""*convert redshift to various distance measurements*
**Key Arguments:**
- ``z`` -- redshift measurement.
- ``WM`` -- Omega_matter. Default *0.3*
- ``WV`` -- Omega_vacuum. Default *0.7*
- ``H0`` -- Hubble constant. (km s-1 Mpc-1) Default *70.0*
**Return:**
- ``results`` -- result dictionary including
- ``dcmr_mpc`` -- co-moving radius distance
- ``da_mpc`` -- angular distance
- ``da_scale`` -- angular distance scale
- ``dl_mpc`` -- luminosity distance (usually use this one)
- ``dmod`` -- distance modulus (determined from luminosity distance)
.. todo::
- replace convert_redshift_to_distance in all other code
**Usage:**
.. code-block:: python
from astrocalc.distances import converter
c = converter(log=log)
dists = c.redshift_to_distance(
z=0.343
)
print "Distance Modulus: " + str(dists["dmod"]) + " mag"
print "Luminousity Distance: " + str(dists["dl_mpc"]) + " Mpc"
print "Angular Size Scale: " + str(dists["da_scale"]) + " kpc/arcsec"
print "Angular Size Distance: " + str(dists["da_mpc"]) + " Mpc"
print "Comoving Radial Distance: " + str(dists["dcmr_mpc"]) + " Mpc"
# OUTPUT :
# Distance Modulus: 41.27 mag
# Luminousity Distance: 1795.16 Mpc
# Angular Size Scale: 4.85 kpc/arcsec
# Angular Size Distance: 999.76 Mpc
# Comoving Radial Distance: 1339.68 Mpc
from astrocalc.distances import converter
c = converter(log=log)
dists = c.redshift_to_distance(
z=0.343,
WM=0.286,
WV=0.714,
H0=69.6
)
print "Distance Modulus: " + str(dists["dmod"]) + " mag"
print "Luminousity Distance: " + str(dists["dl_mpc"]) + " Mpc"
print "Angular Size Scale: " + str(dists["da_scale"]) + " kpc/arcsec"
print "Angular Size Distance: " + str(dists["da_mpc"]) + " Mpc"
print "Comoving Radial Distance: " + str(dists["dcmr_mpc"]) + " Mpc"
# OUTPUT :
# Distance Modulus: 41.29 mag
# Luminousity Distance: 1811.71 Mpc
# Angular Size Scale: 4.89 kpc/arcsec
# Angular Size Distance: 1008.97 Mpc
# Comoving Radial Distance: 1352.03 Mpc
"""
self.log.info('starting the ``redshift_to_distance`` method')
# VARIABLE
h = H0 / 100.0
WR = 4.165E-5 / (h * h) # Omega_radiation
WK = 1.0 - WM - WV - WR # Omega_curvature = 1 - Omega(Total)
c = 299792.458 # speed of light (km/s)
# Arbitrarily set the values of these variables to zero just so we can
# define them.
DCMR = 0.0 # comoving radial distance in units of c/H0
DCMR_Mpc = 0.0 # comoving radial distance in units of Mpc
DA = 0.0 # angular size distance in units of c/H0
DA_Mpc = 0.0 # angular size distance in units of Mpc
# scale at angular size distance in units of Kpc / arcsec
DA_scale = 0.0
DL = 0.0 # luminosity distance in units of c/H0
DL_Mpc = 0.0 # luminosity distance in units of Mpc
# Distance modulus determined from luminosity distance
DMOD = 0.0
a = 0.0 # 1/(1+z), the scale factor of the Universe
az = 1.0 / (1.0 + z) # 1/(1+z), for the given redshift
# Compute the integral over a=1/(1+z) from az to 1 in n steps
n = 1000
for i in range(n):
a = az + (1.0 - az) * (i + 0.5) / n
adot = math.sqrt(WK + (WM / a) + (WR / (math.pow(a, 2)))
+ (WV * math.pow(a, 2)))
DCMR = DCMR + 1.0 / (a * adot)
# comoving radial distance in units of c/H0
DCMR = (1.0 - az) * DCMR / n
# comoving radial distance in units of Mpc
DCMR_Mpc = (c / H0) * DCMR
# Tangental comoving radial distance
x = math.sqrt(abs(WK)) * DCMR
if x > 0.1:
if WK > 0.0:
ratio = 0.5 * (math.exp(x) - math.exp(-x)) / x
else:
ratio = math.sin(x) / x
else:
y = math.pow(x, 2)
if WK < 0.0:
y = -y
ratio = 1 + y / 6.0 + math.pow(y, 2) / 120.0
DA = az * ratio * DCMR # angular size distance in units of c/H0
DA_Mpc = (c / H0) * DA # angular size distance in units of Mpc
# scale at angular size distance in units of Kpc / arcsec
DA_scale = DA_Mpc / 206.264806
DL = DA / math.pow(az, 2) # luminosity distance in units of c/H0
DL_Mpc = (c / H0) * DL # luminosity distance in units of Mpc
# Distance modulus determined from luminosity distance
DMOD = 5 * math.log10(DL_Mpc * 1e6) - 5
# FIXING PRECISIONS
# PRECISION TEST
precision = len(repr(z).split(".")[-1])
DCMR_Mpc = "%0.*f" % (precision, DCMR_Mpc)
DA_Mpc = "%0.*f" % (precision, DA_Mpc)
DA_scale = "%0.*f" % (precision, DA_scale)
DL_Mpc = "%0.*f" % (precision, DL_Mpc)
DMOD = "%0.*f" % (precision, DMOD)
z = "%0.*f" % (precision, z)
results = \
{
"dcmr_mpc": float(DCMR_Mpc),
"da_mpc": float(DA_Mpc),
"da_scale": float(DA_scale),
"dl_mpc": float(DL_Mpc),
"dmod": float(DMOD),
"z": float(z)
}
self.log.info('completed the ``redshift_to_distance`` method')
return results
|
thespacedoctor/astrocalc
|
astrocalc/distances/converter.py
|
converter.redshift_to_distance
|
python
|
def redshift_to_distance(
self,
z,
WM=0.3,
WV=0.7,
H0=70.0):
self.log.info('starting the ``redshift_to_distance`` method')
# VARIABLE
h = H0 / 100.0
WR = 4.165E-5 / (h * h) # Omega_radiation
WK = 1.0 - WM - WV - WR # Omega_curvature = 1 - Omega(Total)
c = 299792.458 # speed of light (km/s)
# Arbitrarily set the values of these variables to zero just so we can
# define them.
DCMR = 0.0 # comoving radial distance in units of c/H0
DCMR_Mpc = 0.0 # comoving radial distance in units of Mpc
DA = 0.0 # angular size distance in units of c/H0
DA_Mpc = 0.0 # angular size distance in units of Mpc
# scale at angular size distance in units of Kpc / arcsec
DA_scale = 0.0
DL = 0.0 # luminosity distance in units of c/H0
DL_Mpc = 0.0 # luminosity distance in units of Mpc
# Distance modulus determined from luminosity distance
DMOD = 0.0
a = 0.0 # 1/(1+z), the scale factor of the Universe
az = 1.0 / (1.0 + z) # 1/(1+z), for the given redshift
# Compute the integral over a=1/(1+z) from az to 1 in n steps
n = 1000
for i in range(n):
a = az + (1.0 - az) * (i + 0.5) / n
adot = math.sqrt(WK + (WM / a) + (WR / (math.pow(a, 2)))
+ (WV * math.pow(a, 2)))
DCMR = DCMR + 1.0 / (a * adot)
# comoving radial distance in units of c/H0
DCMR = (1.0 - az) * DCMR / n
# comoving radial distance in units of Mpc
DCMR_Mpc = (c / H0) * DCMR
# Tangental comoving radial distance
x = math.sqrt(abs(WK)) * DCMR
if x > 0.1:
if WK > 0.0:
ratio = 0.5 * (math.exp(x) - math.exp(-x)) / x
else:
ratio = math.sin(x) / x
else:
y = math.pow(x, 2)
if WK < 0.0:
y = -y
ratio = 1 + y / 6.0 + math.pow(y, 2) / 120.0
DA = az * ratio * DCMR # angular size distance in units of c/H0
DA_Mpc = (c / H0) * DA # angular size distance in units of Mpc
# scale at angular size distance in units of Kpc / arcsec
DA_scale = DA_Mpc / 206.264806
DL = DA / math.pow(az, 2) # luminosity distance in units of c/H0
DL_Mpc = (c / H0) * DL # luminosity distance in units of Mpc
# Distance modulus determined from luminosity distance
DMOD = 5 * math.log10(DL_Mpc * 1e6) - 5
# FIXING PRECISIONS
# PRECISION TEST
precision = len(repr(z).split(".")[-1])
DCMR_Mpc = "%0.*f" % (precision, DCMR_Mpc)
DA_Mpc = "%0.*f" % (precision, DA_Mpc)
DA_scale = "%0.*f" % (precision, DA_scale)
DL_Mpc = "%0.*f" % (precision, DL_Mpc)
DMOD = "%0.*f" % (precision, DMOD)
z = "%0.*f" % (precision, z)
results = \
{
"dcmr_mpc": float(DCMR_Mpc),
"da_mpc": float(DA_Mpc),
"da_scale": float(DA_scale),
"dl_mpc": float(DL_Mpc),
"dmod": float(DMOD),
"z": float(z)
}
self.log.info('completed the ``redshift_to_distance`` method')
return results
|
*convert redshift to various distance measurements*
**Key Arguments:**
- ``z`` -- redshift measurement.
- ``WM`` -- Omega_matter. Default *0.3*
- ``WV`` -- Omega_vacuum. Default *0.7*
- ``H0`` -- Hubble constant. (km s-1 Mpc-1) Default *70.0*
**Return:**
- ``results`` -- result dictionary including
- ``dcmr_mpc`` -- co-moving radius distance
- ``da_mpc`` -- angular distance
- ``da_scale`` -- angular distance scale
- ``dl_mpc`` -- luminosity distance (usually use this one)
- ``dmod`` -- distance modulus (determined from luminosity distance)
.. todo::
- replace convert_redshift_to_distance in all other code
**Usage:**
.. code-block:: python
from astrocalc.distances import converter
c = converter(log=log)
dists = c.redshift_to_distance(
z=0.343
)
print "Distance Modulus: " + str(dists["dmod"]) + " mag"
print "Luminousity Distance: " + str(dists["dl_mpc"]) + " Mpc"
print "Angular Size Scale: " + str(dists["da_scale"]) + " kpc/arcsec"
print "Angular Size Distance: " + str(dists["da_mpc"]) + " Mpc"
print "Comoving Radial Distance: " + str(dists["dcmr_mpc"]) + " Mpc"
# OUTPUT :
# Distance Modulus: 41.27 mag
# Luminousity Distance: 1795.16 Mpc
# Angular Size Scale: 4.85 kpc/arcsec
# Angular Size Distance: 999.76 Mpc
# Comoving Radial Distance: 1339.68 Mpc
from astrocalc.distances import converter
c = converter(log=log)
dists = c.redshift_to_distance(
z=0.343,
WM=0.286,
WV=0.714,
H0=69.6
)
print "Distance Modulus: " + str(dists["dmod"]) + " mag"
print "Luminousity Distance: " + str(dists["dl_mpc"]) + " Mpc"
print "Angular Size Scale: " + str(dists["da_scale"]) + " kpc/arcsec"
print "Angular Size Distance: " + str(dists["da_mpc"]) + " Mpc"
print "Comoving Radial Distance: " + str(dists["dcmr_mpc"]) + " Mpc"
# OUTPUT :
# Distance Modulus: 41.29 mag
# Luminousity Distance: 1811.71 Mpc
# Angular Size Scale: 4.89 kpc/arcsec
# Angular Size Distance: 1008.97 Mpc
# Comoving Radial Distance: 1352.03 Mpc
|
train
|
https://github.com/thespacedoctor/astrocalc/blob/dfbebf9b86d7b2d2110c48a6a4f4194bf8885b86/astrocalc/distances/converter.py#L113-L265
| null |
class converter():
"""
*A converter to switch distance between various units of measurement*
**Key Arguments:**
- ``log`` -- logger
- ``settings`` -- the settings dictionary
**Usage:**
To instantiate a ``converter`` object:
.. code-block:: python
from astrocalc.distances import converter
c = converter(log=log)
"""
# Initialisation
def __init__(
self,
log,
settings=False,
):
self.log = log
log.debug("instansiating a new 'converter' object")
self.settings = settings
# xt-self-arg-tmpx
# Initial Actions
return None
def distance_to_redshift(
self,
mpc):
"""*Convert a distance from MPC to redshift*
The code works by iteratively converting a redshift to a distance, correcting itself and honing in on the true answer (within a certain precision)
**Key Arguments:**
- ``mpc`` -- distance in MPC (assumes a luminousity distance).
**Return:**
- ``redshift``
.. todo::
- replace convert_mpc_to_redshift in all code
**Usage:**
.. code-block:: python
from astrocalc.distances import converter
c = converter(log=log)
z = c.distance_to_redshift(
mpc=500
)
print z
# OUTPUT: 0.108
"""
self.log.info('starting the ``distance_to_redshift`` method')
lowerLimit = 0.
upperLimit = 30.
redshift = upperLimit - lowerLimit
distGuess = float(self.redshift_to_distance(redshift)['dl_mpc'])
distDiff = mpc - distGuess
while math.fabs(distDiff) > 0.0001:
if distGuess < mpc:
lowerLimit = redshift
redshift = lowerLimit + (upperLimit - lowerLimit) / 2.
distGuess = float(
self.redshift_to_distance(redshift)['dl_mpc'])
elif distGuess > mpc:
upperLimit = redshift
redshift = lowerLimit + (upperLimit - lowerLimit) / 2.
distGuess = float(
self.redshift_to_distance(redshift)['dl_mpc'])
distDiff = mpc - distGuess
redshift = float("%5.4f" % (redshift,))
self.log.info('completed the ``distance_to_redshift`` method')
return redshift
|
thespacedoctor/astrocalc
|
astrocalc/coords/separations.py
|
separations.get
|
python
|
def get(
self):
self.log.info('starting the ``get_angular_separation`` method')
from astrocalc.coords import unit_conversion
# CONSTANTS
pi = (4 * math.atan(1.0))
DEG_TO_RAD_FACTOR = pi / 180.0
RAD_TO_DEG_FACTOR = 180.0 / pi
converter = unit_conversion(
log=self.log
)
dec1 = converter.dec_sexegesimal_to_decimal(
dec=self.dec1
)
dec2 = converter.dec_sexegesimal_to_decimal(
dec=self.dec2
)
ra1 = converter.ra_sexegesimal_to_decimal(
ra=self.ra1
)
ra2 = converter.ra_sexegesimal_to_decimal(
ra=self.ra2
)
# PRECISION TEST
precision = 100
vals = [dec1, dec2, ra1, ra2]
for v in vals:
thisLen = len(repr(v * 3600.).split(".")[-1])
if thisLen < precision:
precision = thisLen
angularSeparation = None
aa = (90.0 - dec1) * DEG_TO_RAD_FACTOR
bb = (90.0 - dec2) * DEG_TO_RAD_FACTOR
cc = (ra1 - ra2) * DEG_TO_RAD_FACTOR
one = math.cos(aa) * math.cos(bb)
two = math.sin(aa) * math.sin(bb) * math.cos(cc)
# Because acos() returns NaN outside the ranges of -1 to +1
# we need to check this. Double precision decimal places
# can give values like 1.0000000000002 which will throw an
# exception.
three = one + two
if (three > 1.0):
three = 1.0
if (three < -1.0):
three = -1.0
# BE CAREFUL WITH PRECISION PROPAGATION
thisVal = math.acos(three)
angularSeparation = float(thisVal) * RAD_TO_DEG_FACTOR * 3600.0
# Now work out N-S, E-W separations (object 1 relative to 2)
north = -(dec1 - dec2) * 3600.0
east = -(ra1 - ra2) * \
math.cos((dec1 + dec2) * DEG_TO_RAD_FACTOR / 2.) * 3600.0
angularSeparation = "%0.*f" % (precision, angularSeparation)
north = "%0.*f" % (precision, north)
east = "%0.*f" % (precision, east)
self.log.info('completed the ``get_angular_separation`` method')
return angularSeparation, north, east
|
*Calulate the angular separation between two locations on the sky*
Input precision should be respected.
**Key Arguments:**
**Return:**
- ``angularSeparation`` -- total angular separation between coordinates (arcsec)
- ``north`` -- north-south separation between coordinates (arcsec)
- ``east`` -- east-west separation between coordinates (arcsec)
See main class usage for details.
|
train
|
https://github.com/thespacedoctor/astrocalc/blob/dfbebf9b86d7b2d2110c48a6a4f4194bf8885b86/astrocalc/coords/separations.py#L110-L191
|
[
"def dec_sexegesimal_to_decimal(\n self,\n dec):\n \"\"\"\n *Convert a declination from sexegesimal format to decimal degrees.*\n\n Precision should be respected. If a float is passed to this method, the same float will be returned (useful if unclear which format coordinates are in).\n\n The code will attempt to read the sexegesimal value in whatever form it is passed. Any of the following should be handled correctly:\n\n - ``+1:58:05.45341``\n - ``01:5:05``\n - ``+1 58 05.45341``\n - ``-23h53m05s``\n\n **Key Arguments:**\n - ``dec`` - DEC in sexegesimal format.\n\n **Return:**\n - ``decDeg`` -- declination converted to decimal degrees\n\n **Usage:**\n\n .. todo::\n\n - replace dryxPython declination_sexegesimal_to_decimal with this version in all my code\n - replace coords_sex_to_dec in all code\n\n .. code-block:: python\n\n from astrocalc.coords import unit_conversion\n converter = unit_conversion(\n log=log\n )\n dec = converter.dec_sexegesimal_to_decimal(\n dec=\"-23:45:21.23232\"\n )\n print dec\n\n # OUTPUT: -23.7558978667\n \"\"\"\n self.log.info(\n 'starting the ``dec_sexegesimal_to_decimal`` method')\n\n import re\n\n # TEST TO SEE IF DECIMAL DEGREES PASSED\n try:\n dec = float(dec)\n if dec > -90. and dec < 90.:\n self.log.info(\n 'declination seems to already be in decimal degrees, returning original value' % locals())\n return float(dec)\n except:\n pass\n\n # REMOVE SURROUNDING WHITESPACE\n dec = str(dec).strip()\n\n # LOOK FOR A MINUS SIGN. NOTE THAT -00 IS THE SAME AS 00.\n regex = re.compile(\n '^([\\+\\-]?(\\d|[0-8]\\d))\\D+([0-5]\\d)\\D+([0-6]?\\d(\\.\\d+)?)$')\n decMatch = regex.match(dec)\n\n if decMatch:\n degrees = decMatch.group(1)\n minutes = decMatch.group(3)\n seconds = decMatch.group(4)\n\n if degrees[0] == '-':\n sgn = -1\n else:\n sgn = 1\n\n degrees = abs(float(degrees))\n minutes = float(minutes)\n seconds = float(seconds)\n\n # PRECISION TEST\n # 1s = .000277778 DEGREE\n # THEREFORE REPORT SECONDS TO A PRECISION = INPUT PRECISION + 4\n decimalLen = len(repr(seconds).split(\".\")[-1])\n precision = decimalLen + 4\n\n decDeg = (degrees + (minutes / 60.0)\n + (seconds / 3600.0)) * sgn\n\n decDeg = \"%0.*f\" % (precision, decDeg)\n\n else:\n raise IOError(\n \"could not convert dec to decimal degrees, could not parse sexegesimal input. Original value was `%(dec)s`\" % locals())\n\n decDeg = float(decDeg)\n self.log.debug('decDeg: %(decDeg)s' % locals())\n self.log.info(\n 'completed the ``dec_sexegesimal_to_decimal`` method')\n\n return float(decDeg)\n",
"def ra_sexegesimal_to_decimal(\n self,\n ra\n):\n \"\"\"\n *Convert a right-ascension from sexegesimal format to decimal degrees.*\n\n Precision should be respected. If a float is passed to this method, the same float will be returned (useful if unclear which format coordinates are in).\n\n The code will attempt to read the sexegesimal value in whatever form it is passed. Any of the following should be handled correctly\n\n - ``23:45:21.23232``\n - ``23h45m21.23232s``\n - ``23 45 21.23232``\n - ``2 04 21.23232``\n - ``04:45 21``\n\n **Key Arguments:**\n - ``ra`` -- ra in sexegesimal units\n\n **Return:**\n - ``decimalDegrees``\n\n **Usage:**\n\n .. code-block:: python\n\n - replace dryxPython ra_sexegesimal_to_decimal with this version in all my code\n\n from astrocalc.coords import unit_conversion\n converter = unit_conversion(\n log=log\n )\n ra = converter.ra_sexegesimal_to_decimal(\n ra=\"04:45 21\"\n )\n print ra\n\n # OUTPUT: 71.3375\n \"\"\"\n import re\n\n # TEST TO SEE IF DECIMAL DEGREES PASSED\n try:\n ra = float(ra)\n if ra >= 0. and ra <= 360.:\n self.log.info(\n 'RA seems to already be in decimal degrees, returning original value' % locals())\n return float(ra)\n except:\n pass\n\n # REMOVE SURROUNDING WHITESPACE\n ra = str(ra).strip()\n\n regex = re.compile(\n '^(\\+?(\\d|[0-1]\\d|2[0-3]))\\D+([0-5]\\d)\\D+([0-6]?\\d(\\.\\d*?)?)(s)?\\s*?$')\n raMatch = regex.match(ra)\n\n if raMatch:\n degrees = raMatch.group(1)\n minutes = raMatch.group(3)\n seconds = raMatch.group(4)\n\n degrees = abs(float(degrees)) * 15.0\n minutes = float(minutes) * 15.0\n seconds = float(seconds) * 15.0\n\n # PRECISION TEST\n # 1s ARCSEC = .000018519 DEGREE\n # THEREFORE REPORT SECONDS TO A PRECISION = INPUT PRECISION + 5\n decimalLen = len(repr(seconds).split(\".\")[-1])\n precision = decimalLen + 5\n\n decimalDegrees = (degrees + (minutes / 60.0)\n + (seconds / 3600.0))\n\n decimalDegrees = \"%0.*f\" % (precision, decimalDegrees)\n\n else:\n raise IOError(\n \"could not convert ra to decimal degrees, could not parse sexegesimal input. Original value was `%(ra)s`\" % locals())\n\n raDeg = decimalDegrees\n self.log.debug('raDeg: %(decimalDegrees)s' % locals())\n self.log.info(\n 'completed the ``ra_sexegesimal_to_decimal`` method')\n\n return float(raDeg)\n"
] |
class separations():
"""
*The worker class for the separations module*
**Key Arguments:**
- ``log`` -- logger
- ``settings`` -- the settings dictionary
- ``ra1`` -- the right-ascension of the first location. Decimal degrees or sexegesimal.
- ``dec1`` -- the declination of the first location. Decimal degrees or sexegesimal.
- ``ra2`` -- the right-ascension of the second location. Decimal degrees or sexegesimal.
- ``dec2`` -- the declination of the second location. Decimal degrees or sexegesimal.
**Usage:**
.. todo::
- replace get_angular_separation throughout all code using dryxPython
- replace getAngularSeparationthroughout all code using dryxPython
You can input sexegesimal coordinates,
.. code-block:: python
from astrocalc.coords import separations
calculator = separations(
log=log,
ra1="23:32:23.2324",
dec1="-13:32:45.43553",
ra2="23:32:34.642",
dec2="-12:12:34.9334",
)
angularSeparation, north, east = calculator.get()
print angularSeparation, north, east
# OUT: '4813.39431', '4810.50214', '166.83941'
or decimal degrees,
.. code-block:: python
from astrocalc.coords import separations
calculator = separations(
log=log,
ra1=2.3342343,
dec1=89.23244233,
ra2=45.343545345,
dec2=87.3435435
)
angularSeparation, north, east = calculator.get()
print angularSeparation, north, east
# OUT: '7774.4375', '-6800.0358', '4625.7620'
or even a mix of both
.. code-block:: python
from astrocalc.coords import separations
calculator = separations(
log=log,
ra1=352.5342343,
dec1=89.23,
ra2="23:32:34.642",
dec2="89:12:34.9334"
)
angularSeparation, north, east = calculator.get()
print angularSeparation, north, east
# OUT: '78.9', '-73.1', '29.9')
"""
# Initialisation
def __init__(
self,
log,
ra1,
dec1,
ra2,
dec2,
settings=False
):
self.log = log
log.debug("instansiating a new 'separations' object")
self.settings = settings
self.ra1 = ra1
self.dec1 = dec1
self.ra2 = ra2
self.dec2 = dec2
# xt-self-arg-tmpx
return None
|
thespacedoctor/astrocalc
|
astrocalc/cl_utils.py
|
main
|
python
|
def main(arguments=None):
from astrocalc.coords import unit_conversion
# setup the command-line util settings
su = tools(
arguments=arguments,
docString=__doc__,
logLevel="CRITICAL",
options_first=True,
projectName="astrocalc"
)
arguments, settings, log, dbConn = su.setup()
# tab completion for raw_input
readline.set_completer_delims(' \t\n;')
readline.parse_and_bind("tab: complete")
readline.set_completer(tab_complete)
# unpack remaining cl arguments using `exec` to setup the variable names
# automatically
for arg, val in arguments.iteritems():
if arg[0] == "-":
varname = arg.replace("-", "") + "Flag"
else:
varname = arg.replace("<", "").replace(">", "")
if isinstance(val, str) or isinstance(val, unicode):
exec(varname + " = '%s'" % (val,))
else:
exec(varname + " = %s" % (val,))
if arg == "--dbConn":
dbConn = val
log.debug('%s = %s' % (varname, val,))
## START LOGGING ##
startTime = times.get_now_sql_datetime()
log.info(
'--- STARTING TO RUN THE cl_utils.py AT %s' %
(startTime,))
# set options interactively if user requests
if "interactiveFlag" in locals() and interactiveFlag:
# load previous settings
moduleDirectory = os.path.dirname(__file__) + "/resources"
pathToPickleFile = "%(moduleDirectory)s/previousSettings.p" % locals()
try:
with open(pathToPickleFile):
pass
previousSettingsExist = True
except:
previousSettingsExist = False
previousSettings = {}
if previousSettingsExist:
previousSettings = pickle.load(open(pathToPickleFile, "rb"))
# x-raw-input
# x-boolean-raw-input
# x-raw-input-with-default-value-from-previous-settings
# save the most recently used requests
pickleMeObjects = []
pickleMe = {}
theseLocals = locals()
for k in pickleMeObjects:
pickleMe[k] = theseLocals[k]
pickle.dump(pickleMe, open(pathToPickleFile, "wb"))
# CALL FUNCTIONS/OBJECTS
if coordflip:
if cartesianFlag:
converter = unit_conversion(
log=log
)
x, y, z = converter.ra_dec_to_cartesian(
ra="23 45 21.23232",
dec="+01:58:5.45341"
)
print x, y, z
return
try:
ra = float(ra)
dec = float(dec)
degree = True
except Exception, e:
degree = False
if degree is True:
converter = unit_conversion(
log=log
)
try:
ra = converter.ra_decimal_to_sexegesimal(
ra=ra,
delimiter=":"
)
dec = converter.dec_decimal_to_sexegesimal(
dec=dec,
delimiter=":"
)
except Exception, e:
print e
sys.exit(0)
print ra, dec
else:
converter = unit_conversion(
log=log
)
try:
ra = converter.ra_sexegesimal_to_decimal(
ra=ra
)
dec = converter.dec_sexegesimal_to_decimal(
dec=dec
)
except Exception, e:
print e
sys.exit(0)
print ra, dec
if sep:
from astrocalc.coords import separations
calculator = separations(
log=log,
ra1=ra1,
dec1=dec1,
ra2=ra2,
dec2=dec2,
)
angularSeparation, north, east = calculator.get()
print """%(angularSeparation)s arcsec (%(north)s N, %(east)s E)""" % locals()
if timeflip:
try:
inputMjd = float(datetime)
if datetime[0] not in ["0", "1", "2"]:
inputMjd = True
else:
inputMjd = False
except:
inputMjd = False
from astrocalc.times import conversions
converter = conversions(
log=log
)
if inputMjd == False:
try:
mjd = converter.ut_datetime_to_mjd(utDatetime=datetime)
print mjd
except Exception, e:
print e
else:
try:
utDate = converter.mjd_to_ut_datetime(mjd=datetime)
print utDate
except Exception, e:
print e
if trans:
# TRANSLATE COORDINATES ACROSS SKY
from astrocalc.coords import translate
newRa, newDec = translate(
log=log,
ra=ra,
dec=dec,
northArcsec=float(north),
eastArcsec=float(east)
).get()
from astrocalc.coords import unit_conversion
converter = unit_conversion(
log=log
)
ra = converter.ra_decimal_to_sexegesimal(
ra=newRa,
delimiter=":"
)
dec = converter.dec_decimal_to_sexegesimal(
dec=newDec,
delimiter=":"
)
print "%(newRa)s, %(newDec)s (%(ra)s, %(dec)s)" % locals()
if now:
from astrocalc.times import now
mjd = now(
log=log
).get_mjd()
print mjd
if dist and redshiftFlag:
from astrocalc.distances import converter
c = converter(log=log)
if not hcFlag:
hcFlag = 70.
if not wmFlag:
wmFlag = 0.3
if not wvFlag:
wvFlag = 0.7
dists = c.redshift_to_distance(
z=float(distVal),
WM=float(wmFlag),
WV=float(wvFlag),
H0=float(hcFlag)
)
print "Distance Modulus: " + str(dists["dmod"]) + " mag"
print "Luminousity Distance: " + str(dists["dl_mpc"]) + " Mpc"
print "Angular Size Scale: " + str(dists["da_scale"]) + " kpc/arcsec"
print "Angular Size Distance: " + str(dists["da_mpc"]) + " Mpc"
print "Comoving Radial Distance: " + str(dists["dcmr_mpc"]) + " Mpc"
if dist and mpcFlag:
from astrocalc.distances import converter
c = converter(log=log)
z = c.distance_to_redshift(
mpc=float(distVal)
)
print "z = %(z)s" % locals()
if "dbConn" in locals() and dbConn:
dbConn.commit()
dbConn.close()
## FINISH LOGGING ##
endTime = times.get_now_sql_datetime()
runningTime = times.calculate_time_difference(startTime, endTime)
log.info('-- FINISHED ATTEMPT TO RUN THE cl_utils.py AT %s (RUNTIME: %s) --' %
(endTime, runningTime, ))
return
|
*The main function used when ``cl_utils.py`` is run as a single script from the cl, or when installed as a cl command*
|
train
|
https://github.com/thespacedoctor/astrocalc/blob/dfbebf9b86d7b2d2110c48a6a4f4194bf8885b86/astrocalc/cl_utils.py#L58-L291
|
[
"def ut_datetime_to_mjd(\n self,\n utDatetime):\n \"\"\"*ut datetime to mjd*\n\n If the date given has no time associated with it (e.g. ``20160426``), then the datetime assumed is ``20160426 00:00:00.0``.\n\n Precision should be respected. \n\n **Key Arguments:**\n - ``utDatetime`` -- UT datetime. Can accept various formats e.g. ``201604261444``, ``20160426``, ``20160426144444.5452``, ``2016-04-26 14:44:44.234``, ``20160426 14h44m44.432s`` \n\n **Return:**\n - ``mjd`` -- the MJD\n\n .. todo ::\n\n - replace getMJDFromSqlDate in all code\n\n **Usage:**\n\n .. code-block:: python \n\n from astrocalc.times import conversions\n converter = conversions(\n log=log\n )\n mjd = converter.ut_datetime_to_mjd(utDatetime=\"20160426t1446\")\n print mjd\n\n # OUT: 57504.6153\n\n mjd = converter.ut_datetime_to_mjd(utDatetime=\"2016-04-26 14:44:44.234\")\n print mjd\n\n # OUT: 57504.61440\n \"\"\"\n self.log.info('starting the ``ut_datetime_to_mjd`` method')\n\n import time\n import re\n mjd = None\n\n # TRIM WHITESPACE FROM AROUND STRING\n utDatetime = utDatetime.strip()\n\n # DATETIME REGEX\n matchObject = re.match(\n r'^(?P<year>\\d{4})\\D?(?P<month>(0\\d|1[0-2]))\\D?(?P<day>([0-2]\\d|3[0-1])(\\.\\d+)?)(\\D?(?P<hours>([0-1]\\d|2[0-3]))\\D?(?P<minutes>\\d{2})(\\D?(?P<seconds>\\d{2}(\\.\\d*?)?))?)?s?$', utDatetime)\n\n # RETURN ERROR IF REGEX NOT MATCHED\n if not matchObject:\n self.log.error(\n 'UT Datetime is not in a recognised format. Input value was `%(utDatetime)s`' % locals())\n raise IOError(\n 'UT Datetime is not in a recognised format. Input value was `%(utDatetime)s`' % locals())\n\n year = matchObject.group(\"year\")\n month = matchObject.group(\"month\")\n day = matchObject.group(\"day\")\n hours = matchObject.group(\"hours\")\n minutes = matchObject.group(\"minutes\")\n seconds = matchObject.group(\"seconds\")\n\n # CLEAN NUMBERS AND SET OUTPUT PRECISION\n if \".\" in day:\n fhours = (float(day) - int(float(day))) * 24\n hours = int(fhours)\n fminutes = (fhours - hours) * 60\n minutes = int(fminutes)\n seconds = fhours - minutes\n precision = len(repr(day).split(\".\")[-1])\n elif not hours:\n hours = \"00\"\n minutes = \"00\"\n seconds = \"00\"\n precision = 1\n elif not seconds:\n seconds = \"00\"\n # PRECISION TO NEAREST MIN i.e. 0.000694444 DAYS\n precision = 4\n else:\n if \".\" not in seconds:\n precision = 5\n else:\n decLen = len(seconds.split(\".\")[-1])\n precision = 5 + decLen\n\n # CONVERT VALUES TO FLOAT\n seconds = float(seconds)\n year = float(year)\n month = float(month)\n day = float(day)\n hours = float(hours)\n minutes = float(minutes)\n\n # DETERMINE EXTRA TIME (SMALLER THAN A SEC)\n extraTime = 0.\n if \".\" in repr(seconds):\n extraTime = float(\".\" + repr(seconds).split(\".\")\n [-1]) / (24. * 60. * 60.)\n\n # CONVERT TO UNIXTIME THEN MJD\n t = (int(year), int(month), int(day), int(hours),\n int(minutes), int(seconds), 0, 0, 0)\n unixtime = int(time.mktime(t))\n mjd = (unixtime / 86400.0 - 2400000.5 + 2440587.5) + extraTime\n\n mjd = \"%0.*f\" % (precision, mjd)\n\n self.log.info('completed the ``ut_datetime_to_mjd`` method')\n return mjd\n",
"def mjd_to_ut_datetime(\n self,\n mjd,\n sqlDate=False,\n datetimeObject=False):\n \"\"\"*mjd to ut datetime*\n\n Precision should be respected. \n\n **Key Arguments:**\n - ``mjd`` -- time in MJD.\n - ``sqlDate`` -- add a 'T' between date and time instead of space\n - ``datetimeObject`` -- return a datetime object instead of a string. Default *False*\n\n .. todo::\n\n - replace getDateFromMJD in all code\n - replace getSQLDateFromMJD in all code\n\n **Return:**\n - ``utDatetime`` - the UT datetime in string format\n\n **Usage:**\n\n .. code-block:: python \n\n from astrocalc.times import conversions\n converter = conversions(\n log=log\n )\n utDate = converter.mjd_to_ut_datetime(\n mjd=57504.61577585013\n )\n print utDate\n\n # OUT: 2016-04-26 14:46:43.033\n\n utDate = converter.mjd_to_ut_datetime(\n mjd=57504.61577585013,\n sqlDate=True\n )\n print utDate\n\n # OUT: 2016-04-26T14:46:43.033\n \"\"\"\n self.log.info('starting the ``mjd_to_ut_datetime`` method')\n\n from datetime import datetime\n\n # CONVERT TO UNIXTIME\n unixtime = (float(mjd) + 2400000.5 - 2440587.5) * 86400.0\n theDate = datetime.utcfromtimestamp(unixtime)\n\n if datetimeObject == False:\n # DETERMINE PRECISION\n strmjd = repr(mjd)\n if \".\" not in strmjd:\n precisionUnit = \"day\"\n precision = 0\n utDatetime = theDate.strftime(\"%Y-%m-%d\")\n else:\n lenDec = len(strmjd.split(\".\")[-1])\n if lenDec < 2:\n precisionUnit = \"day\"\n precision = 0\n utDatetime = theDate.strftime(\"%Y-%m-%d\")\n elif lenDec < 3:\n precisionUnit = \"hour\"\n precision = 0\n utDatetime = theDate.strftime(\"%Y-%m-%d\")\n elif lenDec < 5:\n precisionUnit = \"minute\"\n precision = 0\n utDatetime = theDate.strftime(\"%Y-%m-%d %H:%M\")\n else:\n precisionUnit = \"second\"\n precision = lenDec - 5\n if precision > 3:\n precision = 3\n secs = float(theDate.strftime(\"%S.%f\"))\n secs = \"%02.*f\" % (precision, secs)\n utDatetime = theDate.strftime(\"%Y-%m-%d %H:%M:\") + secs\n\n if sqlDate:\n utDatetime = utDatetime.replace(\" \", \"T\")\n else:\n utDatetime = theDate\n\n self.log.info('completed the ``mjd_to_ut_datetime`` method')\n return utDatetime\n",
"def distance_to_redshift(\n self,\n mpc):\n \"\"\"*Convert a distance from MPC to redshift*\n\n The code works by iteratively converting a redshift to a distance, correcting itself and honing in on the true answer (within a certain precision)\n\n **Key Arguments:**\n - ``mpc`` -- distance in MPC (assumes a luminousity distance).\n\n **Return:**\n - ``redshift``\n\n .. todo::\n\n - replace convert_mpc_to_redshift in all code\n\n **Usage:**\n\n .. code-block:: python\n\n from astrocalc.distances import converter\n c = converter(log=log)\n z = c.distance_to_redshift(\n mpc=500\n )\n\n print z\n\n # OUTPUT: 0.108\n \"\"\"\n self.log.info('starting the ``distance_to_redshift`` method')\n\n lowerLimit = 0.\n upperLimit = 30.\n redshift = upperLimit - lowerLimit\n distGuess = float(self.redshift_to_distance(redshift)['dl_mpc'])\n\n distDiff = mpc - distGuess\n\n while math.fabs(distDiff) > 0.0001:\n if distGuess < mpc:\n lowerLimit = redshift\n redshift = lowerLimit + (upperLimit - lowerLimit) / 2.\n distGuess = float(\n self.redshift_to_distance(redshift)['dl_mpc'])\n elif distGuess > mpc:\n upperLimit = redshift\n redshift = lowerLimit + (upperLimit - lowerLimit) / 2.\n distGuess = float(\n self.redshift_to_distance(redshift)['dl_mpc'])\n distDiff = mpc - distGuess\n\n redshift = float(\"%5.4f\" % (redshift,))\n\n self.log.info('completed the ``distance_to_redshift`` method')\n return redshift\n",
"def redshift_to_distance(\n self,\n z,\n WM=0.3,\n WV=0.7,\n H0=70.0):\n \"\"\"*convert redshift to various distance measurements*\n\n **Key Arguments:**\n - ``z`` -- redshift measurement.\n - ``WM`` -- Omega_matter. Default *0.3*\n - ``WV`` -- Omega_vacuum. Default *0.7*\n - ``H0`` -- Hubble constant. (km s-1 Mpc-1) Default *70.0*\n\n **Return:**\n - ``results`` -- result dictionary including\n - ``dcmr_mpc`` -- co-moving radius distance\n - ``da_mpc`` -- angular distance\n - ``da_scale`` -- angular distance scale\n - ``dl_mpc`` -- luminosity distance (usually use this one)\n - ``dmod`` -- distance modulus (determined from luminosity distance)\n\n .. todo::\n\n - replace convert_redshift_to_distance in all other code\n\n **Usage:**\n\n .. code-block:: python\n\n from astrocalc.distances import converter\n c = converter(log=log)\n dists = c.redshift_to_distance(\n z=0.343\n )\n\n print \"Distance Modulus: \" + str(dists[\"dmod\"]) + \" mag\"\n print \"Luminousity Distance: \" + str(dists[\"dl_mpc\"]) + \" Mpc\"\n print \"Angular Size Scale: \" + str(dists[\"da_scale\"]) + \" kpc/arcsec\"\n print \"Angular Size Distance: \" + str(dists[\"da_mpc\"]) + \" Mpc\"\n print \"Comoving Radial Distance: \" + str(dists[\"dcmr_mpc\"]) + \" Mpc\"\n\n # OUTPUT :\n # Distance Modulus: 41.27 mag\n # Luminousity Distance: 1795.16 Mpc\n # Angular Size Scale: 4.85 kpc/arcsec\n # Angular Size Distance: 999.76 Mpc\n # Comoving Radial Distance: 1339.68 Mpc\n\n from astrocalc.distances import converter\n c = converter(log=log)\n dists = c.redshift_to_distance(\n z=0.343,\n WM=0.286,\n WV=0.714,\n H0=69.6\n )\n\n print \"Distance Modulus: \" + str(dists[\"dmod\"]) + \" mag\"\n print \"Luminousity Distance: \" + str(dists[\"dl_mpc\"]) + \" Mpc\"\n print \"Angular Size Scale: \" + str(dists[\"da_scale\"]) + \" kpc/arcsec\"\n print \"Angular Size Distance: \" + str(dists[\"da_mpc\"]) + \" Mpc\"\n print \"Comoving Radial Distance: \" + str(dists[\"dcmr_mpc\"]) + \" Mpc\"\n\n # OUTPUT :\n # Distance Modulus: 41.29 mag\n # Luminousity Distance: 1811.71 Mpc\n # Angular Size Scale: 4.89 kpc/arcsec\n # Angular Size Distance: 1008.97 Mpc\n # Comoving Radial Distance: 1352.03 Mpc\n\n \"\"\"\n self.log.info('starting the ``redshift_to_distance`` method')\n\n # VARIABLE\n h = H0 / 100.0\n WR = 4.165E-5 / (h * h) # Omega_radiation\n WK = 1.0 - WM - WV - WR # Omega_curvature = 1 - Omega(Total)\n c = 299792.458 # speed of light (km/s)\n\n # Arbitrarily set the values of these variables to zero just so we can\n # define them.\n DCMR = 0.0 # comoving radial distance in units of c/H0\n DCMR_Mpc = 0.0 # comoving radial distance in units of Mpc\n DA = 0.0 # angular size distance in units of c/H0\n DA_Mpc = 0.0 # angular size distance in units of Mpc\n # scale at angular size distance in units of Kpc / arcsec\n DA_scale = 0.0\n DL = 0.0 # luminosity distance in units of c/H0\n DL_Mpc = 0.0 # luminosity distance in units of Mpc\n # Distance modulus determined from luminosity distance\n DMOD = 0.0\n a = 0.0 # 1/(1+z), the scale factor of the Universe\n\n az = 1.0 / (1.0 + z) # 1/(1+z), for the given redshift\n\n # Compute the integral over a=1/(1+z) from az to 1 in n steps\n n = 1000\n for i in range(n):\n a = az + (1.0 - az) * (i + 0.5) / n\n adot = math.sqrt(WK + (WM / a) + (WR / (math.pow(a, 2)))\n + (WV * math.pow(a, 2)))\n DCMR = DCMR + 1.0 / (a * adot)\n\n # comoving radial distance in units of c/H0\n DCMR = (1.0 - az) * DCMR / n\n # comoving radial distance in units of Mpc\n DCMR_Mpc = (c / H0) * DCMR\n\n # Tangental comoving radial distance\n x = math.sqrt(abs(WK)) * DCMR\n if x > 0.1:\n if WK > 0.0:\n ratio = 0.5 * (math.exp(x) - math.exp(-x)) / x\n else:\n ratio = math.sin(x) / x\n else:\n y = math.pow(x, 2)\n if WK < 0.0:\n y = -y\n ratio = 1 + y / 6.0 + math.pow(y, 2) / 120.0\n\n DA = az * ratio * DCMR # angular size distance in units of c/H0\n DA_Mpc = (c / H0) * DA # angular size distance in units of Mpc\n # scale at angular size distance in units of Kpc / arcsec\n DA_scale = DA_Mpc / 206.264806\n DL = DA / math.pow(az, 2) # luminosity distance in units of c/H0\n DL_Mpc = (c / H0) * DL # luminosity distance in units of Mpc\n # Distance modulus determined from luminosity distance\n DMOD = 5 * math.log10(DL_Mpc * 1e6) - 5\n\n # FIXING PRECISIONS\n # PRECISION TEST\n precision = len(repr(z).split(\".\")[-1])\n DCMR_Mpc = \"%0.*f\" % (precision, DCMR_Mpc)\n DA_Mpc = \"%0.*f\" % (precision, DA_Mpc)\n DA_scale = \"%0.*f\" % (precision, DA_scale)\n DL_Mpc = \"%0.*f\" % (precision, DL_Mpc)\n DMOD = \"%0.*f\" % (precision, DMOD)\n z = \"%0.*f\" % (precision, z)\n\n results = \\\n {\n \"dcmr_mpc\": float(DCMR_Mpc),\n \"da_mpc\": float(DA_Mpc),\n \"da_scale\": float(DA_scale),\n \"dl_mpc\": float(DL_Mpc),\n \"dmod\": float(DMOD),\n \"z\": float(z)\n }\n\n self.log.info('completed the ``redshift_to_distance`` method')\n return results\n",
"def get_mjd(self):\n \"\"\"\n *Get the current time as an MJD*\n\n **Return:**\n - ``mjd`` -- the current MJD as a float\n\n **Usage:**\n .. todo::\n\n - add clutil\n - remove `getCurrentMJD` from all other code\n\n .. code-block:: python \n\n from astrocalc.times import now\n mjd = now(\n log=log\n ).get_mjd()\n\n \"\"\"\n self.log.info('starting the ``get_mjd`` method')\n\n jd = time.time() / 86400.0 + 2440587.5\n mjd = jd - 2400000.5\n\n self.log.info('completed the ``get_mjd`` method')\n return mjd\n",
"def get(\n self):\n \"\"\"*Calulate the angular separation between two locations on the sky*\n\n Input precision should be respected.\n\n **Key Arguments:**\n\n **Return:**\n - ``angularSeparation`` -- total angular separation between coordinates (arcsec)\n - ``north`` -- north-south separation between coordinates (arcsec)\n - ``east`` -- east-west separation between coordinates (arcsec)\n\n See main class usage for details.\n \"\"\"\n self.log.info('starting the ``get_angular_separation`` method')\n\n from astrocalc.coords import unit_conversion\n\n # CONSTANTS\n pi = (4 * math.atan(1.0))\n DEG_TO_RAD_FACTOR = pi / 180.0\n RAD_TO_DEG_FACTOR = 180.0 / pi\n\n converter = unit_conversion(\n log=self.log\n )\n dec1 = converter.dec_sexegesimal_to_decimal(\n dec=self.dec1\n )\n dec2 = converter.dec_sexegesimal_to_decimal(\n dec=self.dec2\n )\n ra1 = converter.ra_sexegesimal_to_decimal(\n ra=self.ra1\n )\n ra2 = converter.ra_sexegesimal_to_decimal(\n ra=self.ra2\n )\n\n # PRECISION TEST\n precision = 100\n vals = [dec1, dec2, ra1, ra2]\n for v in vals:\n thisLen = len(repr(v * 3600.).split(\".\")[-1])\n if thisLen < precision:\n precision = thisLen\n\n angularSeparation = None\n\n aa = (90.0 - dec1) * DEG_TO_RAD_FACTOR\n bb = (90.0 - dec2) * DEG_TO_RAD_FACTOR\n cc = (ra1 - ra2) * DEG_TO_RAD_FACTOR\n one = math.cos(aa) * math.cos(bb)\n two = math.sin(aa) * math.sin(bb) * math.cos(cc)\n\n # Because acos() returns NaN outside the ranges of -1 to +1\n # we need to check this. Double precision decimal places\n # can give values like 1.0000000000002 which will throw an\n # exception.\n\n three = one + two\n if (three > 1.0):\n three = 1.0\n if (three < -1.0):\n three = -1.0\n\n # BE CAREFUL WITH PRECISION PROPAGATION\n thisVal = math.acos(three)\n angularSeparation = float(thisVal) * RAD_TO_DEG_FACTOR * 3600.0\n\n # Now work out N-S, E-W separations (object 1 relative to 2)\n north = -(dec1 - dec2) * 3600.0\n east = -(ra1 - ra2) * \\\n math.cos((dec1 + dec2) * DEG_TO_RAD_FACTOR / 2.) * 3600.0\n\n angularSeparation = \"%0.*f\" % (precision, angularSeparation)\n north = \"%0.*f\" % (precision, north)\n east = \"%0.*f\" % (precision, east)\n\n self.log.info('completed the ``get_angular_separation`` method')\n return angularSeparation, north, east\n",
"def get(self):\n \"\"\"\n *translate the coordinates*\n\n **Return:**\n - ``ra`` -- the right-ascension of the translated coordinate\n - ``dec`` -- the declination of the translated coordinate\n \"\"\"\n self.log.info('starting the ``get`` method')\n\n # PRECISION TEST\n decprecision = len(repr(self.dec).split(\".\")[-1])\n raprecision = len(repr(self.ra).split(\".\")[-1])\n\n dec2 = self.dec + self.north\n\n ra2 = self.ra + \\\n ((self.east) /\n (math.cos((self.dec + dec2) * self.DEG_TO_RAD_FACTOR / 2.)))\n\n # FIX VALUES THAT CROSS RA/DEC LIMITS\n while ra2 > 360. or ra2 < 0.:\n while ra2 > 360.:\n ra2 = ra2 - 360.\n while ra2 < 0.:\n ra2 = ra2 + 360.\n while dec2 > 90. or dec2 < -90.:\n while dec2 > 90.:\n dec2 = 180. - dec2\n while dec2 < -90.:\n dec2 = -180. - dec2\n\n ra2 = \"%0.*f\" % (raprecision, ra2)\n dec2 = \"%0.*f\" % (decprecision, dec2)\n\n self.log.info('completed the ``get`` method')\n return ra2, dec2\n",
"def dec_sexegesimal_to_decimal(\n self,\n dec):\n \"\"\"\n *Convert a declination from sexegesimal format to decimal degrees.*\n\n Precision should be respected. If a float is passed to this method, the same float will be returned (useful if unclear which format coordinates are in).\n\n The code will attempt to read the sexegesimal value in whatever form it is passed. Any of the following should be handled correctly:\n\n - ``+1:58:05.45341``\n - ``01:5:05``\n - ``+1 58 05.45341``\n - ``-23h53m05s``\n\n **Key Arguments:**\n - ``dec`` - DEC in sexegesimal format.\n\n **Return:**\n - ``decDeg`` -- declination converted to decimal degrees\n\n **Usage:**\n\n .. todo::\n\n - replace dryxPython declination_sexegesimal_to_decimal with this version in all my code\n - replace coords_sex_to_dec in all code\n\n .. code-block:: python\n\n from astrocalc.coords import unit_conversion\n converter = unit_conversion(\n log=log\n )\n dec = converter.dec_sexegesimal_to_decimal(\n dec=\"-23:45:21.23232\"\n )\n print dec\n\n # OUTPUT: -23.7558978667\n \"\"\"\n self.log.info(\n 'starting the ``dec_sexegesimal_to_decimal`` method')\n\n import re\n\n # TEST TO SEE IF DECIMAL DEGREES PASSED\n try:\n dec = float(dec)\n if dec > -90. and dec < 90.:\n self.log.info(\n 'declination seems to already be in decimal degrees, returning original value' % locals())\n return float(dec)\n except:\n pass\n\n # REMOVE SURROUNDING WHITESPACE\n dec = str(dec).strip()\n\n # LOOK FOR A MINUS SIGN. NOTE THAT -00 IS THE SAME AS 00.\n regex = re.compile(\n '^([\\+\\-]?(\\d|[0-8]\\d))\\D+([0-5]\\d)\\D+([0-6]?\\d(\\.\\d+)?)$')\n decMatch = regex.match(dec)\n\n if decMatch:\n degrees = decMatch.group(1)\n minutes = decMatch.group(3)\n seconds = decMatch.group(4)\n\n if degrees[0] == '-':\n sgn = -1\n else:\n sgn = 1\n\n degrees = abs(float(degrees))\n minutes = float(minutes)\n seconds = float(seconds)\n\n # PRECISION TEST\n # 1s = .000277778 DEGREE\n # THEREFORE REPORT SECONDS TO A PRECISION = INPUT PRECISION + 4\n decimalLen = len(repr(seconds).split(\".\")[-1])\n precision = decimalLen + 4\n\n decDeg = (degrees + (minutes / 60.0)\n + (seconds / 3600.0)) * sgn\n\n decDeg = \"%0.*f\" % (precision, decDeg)\n\n else:\n raise IOError(\n \"could not convert dec to decimal degrees, could not parse sexegesimal input. Original value was `%(dec)s`\" % locals())\n\n decDeg = float(decDeg)\n self.log.debug('decDeg: %(decDeg)s' % locals())\n self.log.info(\n 'completed the ``dec_sexegesimal_to_decimal`` method')\n\n return float(decDeg)\n",
"def ra_sexegesimal_to_decimal(\n self,\n ra\n):\n \"\"\"\n *Convert a right-ascension from sexegesimal format to decimal degrees.*\n\n Precision should be respected. If a float is passed to this method, the same float will be returned (useful if unclear which format coordinates are in).\n\n The code will attempt to read the sexegesimal value in whatever form it is passed. Any of the following should be handled correctly\n\n - ``23:45:21.23232``\n - ``23h45m21.23232s``\n - ``23 45 21.23232``\n - ``2 04 21.23232``\n - ``04:45 21``\n\n **Key Arguments:**\n - ``ra`` -- ra in sexegesimal units\n\n **Return:**\n - ``decimalDegrees``\n\n **Usage:**\n\n .. code-block:: python\n\n - replace dryxPython ra_sexegesimal_to_decimal with this version in all my code\n\n from astrocalc.coords import unit_conversion\n converter = unit_conversion(\n log=log\n )\n ra = converter.ra_sexegesimal_to_decimal(\n ra=\"04:45 21\"\n )\n print ra\n\n # OUTPUT: 71.3375\n \"\"\"\n import re\n\n # TEST TO SEE IF DECIMAL DEGREES PASSED\n try:\n ra = float(ra)\n if ra >= 0. and ra <= 360.:\n self.log.info(\n 'RA seems to already be in decimal degrees, returning original value' % locals())\n return float(ra)\n except:\n pass\n\n # REMOVE SURROUNDING WHITESPACE\n ra = str(ra).strip()\n\n regex = re.compile(\n '^(\\+?(\\d|[0-1]\\d|2[0-3]))\\D+([0-5]\\d)\\D+([0-6]?\\d(\\.\\d*?)?)(s)?\\s*?$')\n raMatch = regex.match(ra)\n\n if raMatch:\n degrees = raMatch.group(1)\n minutes = raMatch.group(3)\n seconds = raMatch.group(4)\n\n degrees = abs(float(degrees)) * 15.0\n minutes = float(minutes) * 15.0\n seconds = float(seconds) * 15.0\n\n # PRECISION TEST\n # 1s ARCSEC = .000018519 DEGREE\n # THEREFORE REPORT SECONDS TO A PRECISION = INPUT PRECISION + 5\n decimalLen = len(repr(seconds).split(\".\")[-1])\n precision = decimalLen + 5\n\n decimalDegrees = (degrees + (minutes / 60.0)\n + (seconds / 3600.0))\n\n decimalDegrees = \"%0.*f\" % (precision, decimalDegrees)\n\n else:\n raise IOError(\n \"could not convert ra to decimal degrees, could not parse sexegesimal input. Original value was `%(ra)s`\" % locals())\n\n raDeg = decimalDegrees\n self.log.debug('raDeg: %(decimalDegrees)s' % locals())\n self.log.info(\n 'completed the ``ra_sexegesimal_to_decimal`` method')\n\n return float(raDeg)\n",
"def ra_decimal_to_sexegesimal(\n self,\n ra,\n delimiter=\":\"):\n \"\"\"\n *Convert a right-ascension between decimal degrees and sexegesimal.*\n\n Precision should be respected.\n\n **Key Arguments:**\n - ``ra`` -- RA in decimal degrees. Will try and convert to float before performing calculation.\n - ``delimiter`` -- how to delimit the RA units. Default *:*\n\n **Return:**\n - ``sexegesimal`` -- ra in sexegesimal units\n\n **Usage:**\n .. todo::\n\n - replace ra_to_sex from dryxPython in all code\n\n .. code-block:: python \n\n from astrocalc.coords import unit_conversion\n converter = unit_conversion(\n log=log\n )\n ra = converter.ra_decimal_to_sexegesimal(\n ra=\"-23.454676456\",\n delimiter=\":\"\n )\n print ra\n\n # OUT: 22:26:10.87\n \"\"\"\n self.log.info('starting the ``ra_decimal_to_sexegesimal`` method')\n\n # CONVERT RA TO FLOAT\n try:\n self.log.debug(\"attempting to convert RA to float\")\n ra = float(ra)\n except Exception, e:\n self.log.error(\n \"could not convert RA to float - failed with this error: %s \" % (str(e),))\n return -1\n\n # COMPLAIN IF RA NOT BETWEEN -360 - 360\n if ra > 0. and ra < 360.:\n pass\n elif ra < 0 and ra > -360.:\n ra = 360. + ra\n else:\n self.log.error(\n \"RA must be between 0 - 360 degrees\")\n return -1\n\n # PRECISION TEST\n # 1s ARCSEC = .000018519 DEGREE\n # THEREFORE REPORT SECONDS TO A PRECISION = INPUT PRECISION - 5\n decimalLen = len(repr(ra).split(\".\")[-1])\n precision = decimalLen - 5\n\n # CALCULATION FROM DECIMAL DEGREES\n import math\n ra_hh = int(ra / 15)\n ra_mm = int((ra / 15 - ra_hh) * 60)\n ra_ss = int(((ra / 15 - ra_hh) * 60 - ra_mm) * 60)\n ra_ff = ((ra / 15 - ra_hh) * 60 - ra_mm) * 60 - ra_ss\n\n # SET PRECISION\n ra_ff = repr(ra_ff)[2:]\n ra_ff = ra_ff[:precision]\n if len(ra_ff):\n ra_ff = \".\" + ra_ff\n if precision < 0:\n ra_ff = \"\"\n\n sexegesimal = '%02d' % ra_hh + delimiter + '%02d' % ra_mm + \\\n delimiter + '%02d' % ra_ss + ra_ff\n\n self.log.info('completed the ``ra_decimal_to_sexegesimal`` method')\n return sexegesimal\n",
"def dec_decimal_to_sexegesimal(\n self,\n dec,\n delimiter=\":\"):\n \"\"\"\n *Convert a declination between decimal degrees and sexegesimal.*\n\n Precision should be respected.\n\n **Key Arguments:**\n - ``dec`` -- DEC in decimal degrees. Will try and convert to float before performing calculation.\n - ``delimiter`` -- how to delimit the RA units. Default *:*\n\n **Return:**\n - ``sexegesimal`` -- ra in sexegesimal units\n\n **Usage:**\n .. todo::\n\n - replace dec_to_sex in dryxPython in all code\n\n .. code-block:: python \n\n from astrocalc.coords import unit_conversion\n converter = unit_conversion(\n log=log\n )\n dec = converter.dec_decimal_to_sexegesimal(\n dec=\"-3.454676456\",\n delimiter=\":\"\n )\n print dec\n\n # OUT: -03:27:16.8\n \"\"\"\n self.log.info('starting the ``dec_decimal_to_sexegesimal`` method')\n\n import math\n\n # CONVERT DEC TO FLOAT\n try:\n self.log.debug(\"attempting to convert RA to float\")\n dec = float(dec)\n except Exception, e:\n self.log.error(\n \"could not convert RA to float - failed with this error: %s \" % (str(e),))\n return -1\n\n # COMPLAIN IF DEC NOT BETWEEN -90 - 90\n if dec > -90. and dec < 90.:\n pass\n else:\n self.log.error(\n \"DEC must be between -90 - 90 degrees\")\n return -1\n\n if (dec >= 0):\n hemisphere = '+'\n else:\n hemisphere = '-'\n dec *= -1\n\n # PRECISION TEST\n # 1s = .000277778 DEGREE\n # THEREFORE REPORT SECONDS TO A PRECISION = INPUT PRECISION - 4\n decimalLen = len(repr(dec).split(\".\")[-1])\n precision = decimalLen - 4\n\n dec_deg = int(dec)\n dec_mm = int((dec - dec_deg) * 60)\n dec_ss = int(((dec - dec_deg) * 60 - dec_mm) * 60)\n dec_f = (((dec - dec_deg) * 60 - dec_mm) * 60) - dec_ss\n\n # SET PRECISION\n dec_f = repr(dec_f)[2:]\n dec_f = dec_f[:precision]\n if len(dec_f):\n dec_f = \".\" + dec_f\n if precision < 0:\n dec_f = \"\"\n\n sexegesimal = hemisphere + '%02d' % dec_deg + delimiter + \\\n '%02d' % dec_mm + delimiter + '%02d' % dec_ss + dec_f\n\n self.log.info('completed the ``dec_decimal_to_sexegesimal`` method')\n return sexegesimal\n",
"def ra_dec_to_cartesian(\n self,\n ra,\n dec):\n \"\"\"*Convert an RA, DEC coordinate set to x, y, z cartesian coordinates*\n\n **Key Arguments:**\n - ``ra`` -- right ascension in sexegesimal or decimal degress.\n - ``dec`` -- declination in sexegesimal or decimal degress.\n\n **Return:**\n - ``cartesians`` -- tuple of (x, y, z) coordinates\n\n .. todo::\n\n - replace calculate_cartesians in all code\n\n **Usage:**\n\n .. code-block:: python \n\n from astrocalc.coords import unit_conversion\n converter = unit_conversion(\n log=log\n )\n x, y, z = converter.ra_dec_to_cartesian(\n ra=\"23 45 21.23232\",\n dec=\"+01:58:5.45341\"\n )\n print x, y, z\n\n # OUTPUT: 0.9973699780687104, -0.06382462462791459, 0.034344492110465606\n \"\"\"\n self.log.info('starting the ``ra_dec_to_cartesian`` method')\n\n ra = self.ra_sexegesimal_to_decimal(\n ra=ra\n )\n dec = self.dec_sexegesimal_to_decimal(\n dec=dec\n )\n\n ra = math.radians(ra)\n dec = math.radians(dec)\n cos_dec = math.cos(dec)\n cx = math.cos(ra) * cos_dec\n cy = math.sin(ra) * cos_dec\n cz = math.sin(dec)\n\n cartesians = (cx, cy, cz)\n\n self.log.info('completed the ``ra_dec_to_cartesian`` method')\n return cartesians\n"
] |
#!/usr/local/bin/python
# encoding: utf-8
"""
Documentation for astrocalc can be found here: http://astrocalc.readthedocs.org/en/stable
Usage:
astrocalc [-c] coordflip <ra> <dec>
astrocalc sep <ra1> <dec1> <ra2> <dec2>
astrocalc timeflip <datetime>
astrocalc trans <ra> <dec> <north> <east>
astrocalc now mjd
astrocalc dist (-z | -m) <distVal> [--hc=hVal --wm=OmegaMatter --wv=OmegaVacuum]
COMMANDS:
========
coordflip flip coordinates between decimal degrees and sexegesimal and vice-versa
sep calculate the separation between two locations in the sky.
timeflip flip time between UT and MJD.
trans translate a location across the sky (north and east in arcsec)
now report current time in various formats
dist convert distance between mpc to z
VARIABLES:
==========
ra, ra1, ra2 right-ascension in deciaml degrees or sexegesimal format
dec, dec1, dec2 declination in deciaml degrees or sexegesimal format
datetime modified julian date (mjd) or universal time (UT). UT can be formated 20150415113334.343 or "20150415 11:33:34.343" (spaces require quotes)
north, east vector components in arcsec
distVal a distance value in Mpc (-mpc) or redshift (-z)
hVal hubble constant value. Default=70 km/s/Mpc
OmegaMatter Omega Matter. Default=0.3
OmegaVacuum Omega Vacuum. Default=0.7
-h, --help show this help message
-m, --mpc distance in mpc
-z, --redshift redshift distance
-c, --cartesian convert to cartesian coordinates
"""
################# GLOBAL IMPORTS ####################
import sys
import os
os.environ['TERM'] = 'vt100'
import readline
import glob
import pickle
from docopt import docopt
from fundamentals import tools, times
from astrocalc.coords import unit_conversion
# from ..__init__ import *
def tab_complete(text, state):
return (glob.glob(text + '*') + [None])[state]
if __name__ == '__main__':
main()
|
thespacedoctor/astrocalc
|
astrocalc/coords/coordinates_to_array.py
|
coordinates_to_array
|
python
|
def coordinates_to_array(
log,
ra,
dec):
log.info('starting the ``coordinates_to_array`` function')
if isinstance(ra, np.ndarray) and isinstance(dec, np.ndarray):
return ra, dec
# ASTROCALC UNIT CONVERTER OBJECT
converter = unit_conversion(
log=log
)
# CONVERT RA AND DEC TO NUMPY ARRAYS
if isinstance(ra, float):
pass
elif isinstance(ra, str):
try:
ra = float(ra)
except:
ra = converter.ra_sexegesimal_to_decimal(ra=ra)
elif isinstance(ra, list):
try:
ra = np.array(ra).astype(np.float)
except:
raList = []
raList[:] = [converter.ra_sexegesimal_to_decimal(ra=r) for r in ra]
ra = raList
if isinstance(dec, float):
pass
elif isinstance(dec, str):
try:
dec = float(dec)
except:
dec = converter.dec_sexegesimal_to_decimal(dec=dec)
elif isinstance(dec, list):
try:
dec = np.array(dec).astype(np.float)
except:
decList = []
decList[:] = [
converter.dec_sexegesimal_to_decimal(dec=d) for d in dec]
dec = decList
raArray = np.array(ra, dtype='f8', ndmin=1, copy=False)
decArray = np.array(dec, dtype='f8', ndmin=1, copy=False)
log.info('completed the ``coordinates_to_array`` function')
return raArray, decArray
|
*Convert a single value RA, DEC or list of RA and DEC to numpy arrays*
**Key Arguments:**
- ``ra`` -- list, numpy array or single ra value
- ``dec`` --list, numpy array or single dec value
- ``log`` -- logger
**Return:**
- ``raArray`` -- input RAs as a numpy array of decimal degree values
- ``decArray`` -- input DECs as a numpy array of decimal degree values
**Usage:**
.. todo::
add usage info
create a sublime snippet for usage
.. code-block:: python
ra, dec = coordinates_to_array(
log=log,
ra=ra,
dec=dec
)
|
train
|
https://github.com/thespacedoctor/astrocalc/blob/dfbebf9b86d7b2d2110c48a6a4f4194bf8885b86/astrocalc/coords/coordinates_to_array.py#L21-L95
| null |
#!/usr/local/bin/python
# encoding: utf-8
"""
*Convert single values of RA, DEC or list of RA and DEC to numpy arrays*
:Author:
David Young
:Date Created:
October 6, 2016
"""
################# GLOBAL IMPORTS ####################
import sys
import os
os.environ['TERM'] = 'vt100'
from fundamentals import tools
import numpy as np
from astrocalc.coords import unit_conversion
|
thespacedoctor/astrocalc
|
astrocalc/coords/translate.py
|
translate.get
|
python
|
def get(self):
self.log.info('starting the ``get`` method')
# PRECISION TEST
decprecision = len(repr(self.dec).split(".")[-1])
raprecision = len(repr(self.ra).split(".")[-1])
dec2 = self.dec + self.north
ra2 = self.ra + \
((self.east) /
(math.cos((self.dec + dec2) * self.DEG_TO_RAD_FACTOR / 2.)))
# FIX VALUES THAT CROSS RA/DEC LIMITS
while ra2 > 360. or ra2 < 0.:
while ra2 > 360.:
ra2 = ra2 - 360.
while ra2 < 0.:
ra2 = ra2 + 360.
while dec2 > 90. or dec2 < -90.:
while dec2 > 90.:
dec2 = 180. - dec2
while dec2 < -90.:
dec2 = -180. - dec2
ra2 = "%0.*f" % (raprecision, ra2)
dec2 = "%0.*f" % (decprecision, dec2)
self.log.info('completed the ``get`` method')
return ra2, dec2
|
*translate the coordinates*
**Return:**
- ``ra`` -- the right-ascension of the translated coordinate
- ``dec`` -- the declination of the translated coordinate
|
train
|
https://github.com/thespacedoctor/astrocalc/blob/dfbebf9b86d7b2d2110c48a6a4f4194bf8885b86/astrocalc/coords/translate.py#L93-L129
| null |
class translate():
"""
*Translate a set of coordinates north and east by distances given in arcsecs*
**Key Arguments:**
- ``log`` -- logger
- ``settings`` -- the settings dictionary. Default *False*
- ``ra`` -- ra (decimal or sexegesimal)
- ``dec`` -- dec (decimal or sexegesimal)
- ``northArcsec`` -- number of arcsecs to move location to the north
- ``eastArcsec`` -- number of arcsecs to move location to the east
.. todo::
- replace shift_coordinates class in all other code
**Usage:**
To shift a set of coordinates north and east by given distances:
.. code-block:: python
# TRANSLATE COORDINATES ACROSS SKY
from astrocalc.coords import translate
ra, dec = translate(
log=log,
settings=settings,
ra="14.546438",
dec="-45.34232334",
northArcsec=4560,
eastArcsec=+967800
).get()
"""
# Initialisation
def __init__(
self,
log,
ra,
dec,
northArcsec,
eastArcsec,
settings=False,
):
self.log = log
log.debug("instansiating a new 'translate' object")
self.settings = settings
self.ra = ra
self.dec = dec
self.north = northArcsec / 3600.
self.east = eastArcsec / 3600.
# xt-self-arg-tmpx
# CONSTANTS
self.pi = (4 * math.atan(1.0))
self.DEG_TO_RAD_FACTOR = self.pi / 180.0
self.RAD_TO_DEG_FACTOR = 180.0 / self.pi
# INITIAL ACTIONS
# CONVERT RA AND DEC INTO DECIMAL DEGREES
converter = unit_conversion(
log=log
)
self.ra = converter.ra_sexegesimal_to_decimal(
ra=self.ra
)
self.dec = converter.dec_sexegesimal_to_decimal(
dec=self.dec
)
return None
|
thespacedoctor/astrocalc
|
astrocalc/times/conversions.py
|
conversions.get
|
python
|
def get(self):
self.log.info('starting the ``get`` method')
conversions = None
self.log.info('completed the ``get`` method')
return conversions
|
*get the conversions object*
**Return:**
- ``conversions``
.. todo::
- @review: when complete, clean get method
- @review: when complete add logging
|
train
|
https://github.com/thespacedoctor/astrocalc/blob/dfbebf9b86d7b2d2110c48a6a4f4194bf8885b86/astrocalc/times/conversions.py#L55-L72
| null |
class conversions():
"""
*The worker class for the conversions module*
**Key Arguments:**
- ``log`` -- logger
- ``settings`` -- the settings dictionary
**Usage:**
.. todo::
- add usage info
- create a sublime snippet for usage
- add mjd_to_date
- add decimal_day_to_day_hour_min_sec
- add date_to_mjd
- convert all functions in __init__ to modules
.. code-block:: python
usage code
"""
# Initialisation
def __init__(
self,
log,
settings=False,
):
self.log = log
log.debug("instansiating a new 'conversions' object")
self.settings = settings
# xt-self-arg-tmpx
return None
def ut_datetime_to_mjd(
self,
utDatetime):
"""*ut datetime to mjd*
If the date given has no time associated with it (e.g. ``20160426``), then the datetime assumed is ``20160426 00:00:00.0``.
Precision should be respected.
**Key Arguments:**
- ``utDatetime`` -- UT datetime. Can accept various formats e.g. ``201604261444``, ``20160426``, ``20160426144444.5452``, ``2016-04-26 14:44:44.234``, ``20160426 14h44m44.432s``
**Return:**
- ``mjd`` -- the MJD
.. todo ::
- replace getMJDFromSqlDate in all code
**Usage:**
.. code-block:: python
from astrocalc.times import conversions
converter = conversions(
log=log
)
mjd = converter.ut_datetime_to_mjd(utDatetime="20160426t1446")
print mjd
# OUT: 57504.6153
mjd = converter.ut_datetime_to_mjd(utDatetime="2016-04-26 14:44:44.234")
print mjd
# OUT: 57504.61440
"""
self.log.info('starting the ``ut_datetime_to_mjd`` method')
import time
import re
mjd = None
# TRIM WHITESPACE FROM AROUND STRING
utDatetime = utDatetime.strip()
# DATETIME REGEX
matchObject = re.match(
r'^(?P<year>\d{4})\D?(?P<month>(0\d|1[0-2]))\D?(?P<day>([0-2]\d|3[0-1])(\.\d+)?)(\D?(?P<hours>([0-1]\d|2[0-3]))\D?(?P<minutes>\d{2})(\D?(?P<seconds>\d{2}(\.\d*?)?))?)?s?$', utDatetime)
# RETURN ERROR IF REGEX NOT MATCHED
if not matchObject:
self.log.error(
'UT Datetime is not in a recognised format. Input value was `%(utDatetime)s`' % locals())
raise IOError(
'UT Datetime is not in a recognised format. Input value was `%(utDatetime)s`' % locals())
year = matchObject.group("year")
month = matchObject.group("month")
day = matchObject.group("day")
hours = matchObject.group("hours")
minutes = matchObject.group("minutes")
seconds = matchObject.group("seconds")
# CLEAN NUMBERS AND SET OUTPUT PRECISION
if "." in day:
fhours = (float(day) - int(float(day))) * 24
hours = int(fhours)
fminutes = (fhours - hours) * 60
minutes = int(fminutes)
seconds = fhours - minutes
precision = len(repr(day).split(".")[-1])
elif not hours:
hours = "00"
minutes = "00"
seconds = "00"
precision = 1
elif not seconds:
seconds = "00"
# PRECISION TO NEAREST MIN i.e. 0.000694444 DAYS
precision = 4
else:
if "." not in seconds:
precision = 5
else:
decLen = len(seconds.split(".")[-1])
precision = 5 + decLen
# CONVERT VALUES TO FLOAT
seconds = float(seconds)
year = float(year)
month = float(month)
day = float(day)
hours = float(hours)
minutes = float(minutes)
# DETERMINE EXTRA TIME (SMALLER THAN A SEC)
extraTime = 0.
if "." in repr(seconds):
extraTime = float("." + repr(seconds).split(".")
[-1]) / (24. * 60. * 60.)
# CONVERT TO UNIXTIME THEN MJD
t = (int(year), int(month), int(day), int(hours),
int(minutes), int(seconds), 0, 0, 0)
unixtime = int(time.mktime(t))
mjd = (unixtime / 86400.0 - 2400000.5 + 2440587.5) + extraTime
mjd = "%0.*f" % (precision, mjd)
self.log.info('completed the ``ut_datetime_to_mjd`` method')
return mjd
def mjd_to_ut_datetime(
self,
mjd,
sqlDate=False,
datetimeObject=False):
"""*mjd to ut datetime*
Precision should be respected.
**Key Arguments:**
- ``mjd`` -- time in MJD.
- ``sqlDate`` -- add a 'T' between date and time instead of space
- ``datetimeObject`` -- return a datetime object instead of a string. Default *False*
.. todo::
- replace getDateFromMJD in all code
- replace getSQLDateFromMJD in all code
**Return:**
- ``utDatetime`` - the UT datetime in string format
**Usage:**
.. code-block:: python
from astrocalc.times import conversions
converter = conversions(
log=log
)
utDate = converter.mjd_to_ut_datetime(
mjd=57504.61577585013
)
print utDate
# OUT: 2016-04-26 14:46:43.033
utDate = converter.mjd_to_ut_datetime(
mjd=57504.61577585013,
sqlDate=True
)
print utDate
# OUT: 2016-04-26T14:46:43.033
"""
self.log.info('starting the ``mjd_to_ut_datetime`` method')
from datetime import datetime
# CONVERT TO UNIXTIME
unixtime = (float(mjd) + 2400000.5 - 2440587.5) * 86400.0
theDate = datetime.utcfromtimestamp(unixtime)
if datetimeObject == False:
# DETERMINE PRECISION
strmjd = repr(mjd)
if "." not in strmjd:
precisionUnit = "day"
precision = 0
utDatetime = theDate.strftime("%Y-%m-%d")
else:
lenDec = len(strmjd.split(".")[-1])
if lenDec < 2:
precisionUnit = "day"
precision = 0
utDatetime = theDate.strftime("%Y-%m-%d")
elif lenDec < 3:
precisionUnit = "hour"
precision = 0
utDatetime = theDate.strftime("%Y-%m-%d")
elif lenDec < 5:
precisionUnit = "minute"
precision = 0
utDatetime = theDate.strftime("%Y-%m-%d %H:%M")
else:
precisionUnit = "second"
precision = lenDec - 5
if precision > 3:
precision = 3
secs = float(theDate.strftime("%S.%f"))
secs = "%02.*f" % (precision, secs)
utDatetime = theDate.strftime("%Y-%m-%d %H:%M:") + secs
if sqlDate:
utDatetime = utDatetime.replace(" ", "T")
else:
utDatetime = theDate
self.log.info('completed the ``mjd_to_ut_datetime`` method')
return utDatetime
def decimal_day_to_day_hour_min_sec(
self,
daysFloat):
"""*Convert a day from decimal format to hours mins and sec*
Precision should be respected.
**Key Arguments:**
- ``daysFloat`` -- the day as a decimal.
**Return:**
- ``daysInt`` -- day as an integer
- ``hoursInt`` -- hour as an integer (None if input precsion too low)
- ``minsInt`` -- mins as an integer (None if input precsion too low)
- ``secFloat`` -- secs as a float (None if input precsion too low)
**Usage:**
.. todo::
- replace `decimal_day_to_day_hour_min_sec` in all other code
.. code-block:: python
from astrocalc.times import conversions
converter = conversions(
log=log
)
daysInt, hoursInt, minsInt, secFloat = converter.decimal_day_to_day_hour_min_sec(
daysFloat=24.2453
)
print daysInt, hoursInt, minsInt, secFloat
# OUTPUT: 24, 5, 53, None
daysInt, hoursInt, minsInt, secFloat = converter.decimal_day_to_day_hour_min_sec(
daysFloat=24.1232435454
)
print "%(daysInt)s days, %(hoursInt)s hours, %(minsInt)s mins, %(secFloat)s sec" % locals()
# OUTPUT: 24 days, 2 hours, 57 mins, 28.242 sec
"""
self.log.info(
'starting the ``decimal_day_to_day_hour_min_sec`` method')
daysInt = int(daysFloat)
hoursFloat = (daysFloat - daysInt) * 24.
hoursInt = int(hoursFloat)
minsFloat = (hoursFloat - hoursInt) * 60.
minsInt = int(minsFloat)
secFloat = (minsFloat - minsInt) * 60.
# DETERMINE PRECISION
strday = repr(daysFloat)
if "." not in strday:
precisionUnit = "day"
precision = 0
hoursInt = None
minsInt = None
secFloat = None
else:
lenDec = len(strday.split(".")[-1])
if lenDec < 2:
precisionUnit = "day"
precision = 0
hoursInt = None
minsInt = None
secFloat = None
elif lenDec < 3:
precisionUnit = "hour"
precision = 0
minsInt = None
secFloat = None
elif lenDec < 5:
precisionUnit = "minute"
precision = 0
secFloat = None
else:
precisionUnit = "second"
precision = lenDec - 5
if precision > 3:
precision = 3
secFloat = "%02.*f" % (precision, secFloat)
self.log.info(
'completed the ``decimal_day_to_day_hour_min_sec`` method')
return daysInt, hoursInt, minsInt, secFloat
|
thespacedoctor/astrocalc
|
astrocalc/times/conversions.py
|
conversions.ut_datetime_to_mjd
|
python
|
def ut_datetime_to_mjd(
self,
utDatetime):
self.log.info('starting the ``ut_datetime_to_mjd`` method')
import time
import re
mjd = None
# TRIM WHITESPACE FROM AROUND STRING
utDatetime = utDatetime.strip()
# DATETIME REGEX
matchObject = re.match(
r'^(?P<year>\d{4})\D?(?P<month>(0\d|1[0-2]))\D?(?P<day>([0-2]\d|3[0-1])(\.\d+)?)(\D?(?P<hours>([0-1]\d|2[0-3]))\D?(?P<minutes>\d{2})(\D?(?P<seconds>\d{2}(\.\d*?)?))?)?s?$', utDatetime)
# RETURN ERROR IF REGEX NOT MATCHED
if not matchObject:
self.log.error(
'UT Datetime is not in a recognised format. Input value was `%(utDatetime)s`' % locals())
raise IOError(
'UT Datetime is not in a recognised format. Input value was `%(utDatetime)s`' % locals())
year = matchObject.group("year")
month = matchObject.group("month")
day = matchObject.group("day")
hours = matchObject.group("hours")
minutes = matchObject.group("minutes")
seconds = matchObject.group("seconds")
# CLEAN NUMBERS AND SET OUTPUT PRECISION
if "." in day:
fhours = (float(day) - int(float(day))) * 24
hours = int(fhours)
fminutes = (fhours - hours) * 60
minutes = int(fminutes)
seconds = fhours - minutes
precision = len(repr(day).split(".")[-1])
elif not hours:
hours = "00"
minutes = "00"
seconds = "00"
precision = 1
elif not seconds:
seconds = "00"
# PRECISION TO NEAREST MIN i.e. 0.000694444 DAYS
precision = 4
else:
if "." not in seconds:
precision = 5
else:
decLen = len(seconds.split(".")[-1])
precision = 5 + decLen
# CONVERT VALUES TO FLOAT
seconds = float(seconds)
year = float(year)
month = float(month)
day = float(day)
hours = float(hours)
minutes = float(minutes)
# DETERMINE EXTRA TIME (SMALLER THAN A SEC)
extraTime = 0.
if "." in repr(seconds):
extraTime = float("." + repr(seconds).split(".")
[-1]) / (24. * 60. * 60.)
# CONVERT TO UNIXTIME THEN MJD
t = (int(year), int(month), int(day), int(hours),
int(minutes), int(seconds), 0, 0, 0)
unixtime = int(time.mktime(t))
mjd = (unixtime / 86400.0 - 2400000.5 + 2440587.5) + extraTime
mjd = "%0.*f" % (precision, mjd)
self.log.info('completed the ``ut_datetime_to_mjd`` method')
return mjd
|
*ut datetime to mjd*
If the date given has no time associated with it (e.g. ``20160426``), then the datetime assumed is ``20160426 00:00:00.0``.
Precision should be respected.
**Key Arguments:**
- ``utDatetime`` -- UT datetime. Can accept various formats e.g. ``201604261444``, ``20160426``, ``20160426144444.5452``, ``2016-04-26 14:44:44.234``, ``20160426 14h44m44.432s``
**Return:**
- ``mjd`` -- the MJD
.. todo ::
- replace getMJDFromSqlDate in all code
**Usage:**
.. code-block:: python
from astrocalc.times import conversions
converter = conversions(
log=log
)
mjd = converter.ut_datetime_to_mjd(utDatetime="20160426t1446")
print mjd
# OUT: 57504.6153
mjd = converter.ut_datetime_to_mjd(utDatetime="2016-04-26 14:44:44.234")
print mjd
# OUT: 57504.61440
|
train
|
https://github.com/thespacedoctor/astrocalc/blob/dfbebf9b86d7b2d2110c48a6a4f4194bf8885b86/astrocalc/times/conversions.py#L74-L185
| null |
class conversions():
"""
*The worker class for the conversions module*
**Key Arguments:**
- ``log`` -- logger
- ``settings`` -- the settings dictionary
**Usage:**
.. todo::
- add usage info
- create a sublime snippet for usage
- add mjd_to_date
- add decimal_day_to_day_hour_min_sec
- add date_to_mjd
- convert all functions in __init__ to modules
.. code-block:: python
usage code
"""
# Initialisation
def __init__(
self,
log,
settings=False,
):
self.log = log
log.debug("instansiating a new 'conversions' object")
self.settings = settings
# xt-self-arg-tmpx
return None
def get(self):
"""
*get the conversions object*
**Return:**
- ``conversions``
.. todo::
- @review: when complete, clean get method
- @review: when complete add logging
"""
self.log.info('starting the ``get`` method')
conversions = None
self.log.info('completed the ``get`` method')
return conversions
def mjd_to_ut_datetime(
self,
mjd,
sqlDate=False,
datetimeObject=False):
"""*mjd to ut datetime*
Precision should be respected.
**Key Arguments:**
- ``mjd`` -- time in MJD.
- ``sqlDate`` -- add a 'T' between date and time instead of space
- ``datetimeObject`` -- return a datetime object instead of a string. Default *False*
.. todo::
- replace getDateFromMJD in all code
- replace getSQLDateFromMJD in all code
**Return:**
- ``utDatetime`` - the UT datetime in string format
**Usage:**
.. code-block:: python
from astrocalc.times import conversions
converter = conversions(
log=log
)
utDate = converter.mjd_to_ut_datetime(
mjd=57504.61577585013
)
print utDate
# OUT: 2016-04-26 14:46:43.033
utDate = converter.mjd_to_ut_datetime(
mjd=57504.61577585013,
sqlDate=True
)
print utDate
# OUT: 2016-04-26T14:46:43.033
"""
self.log.info('starting the ``mjd_to_ut_datetime`` method')
from datetime import datetime
# CONVERT TO UNIXTIME
unixtime = (float(mjd) + 2400000.5 - 2440587.5) * 86400.0
theDate = datetime.utcfromtimestamp(unixtime)
if datetimeObject == False:
# DETERMINE PRECISION
strmjd = repr(mjd)
if "." not in strmjd:
precisionUnit = "day"
precision = 0
utDatetime = theDate.strftime("%Y-%m-%d")
else:
lenDec = len(strmjd.split(".")[-1])
if lenDec < 2:
precisionUnit = "day"
precision = 0
utDatetime = theDate.strftime("%Y-%m-%d")
elif lenDec < 3:
precisionUnit = "hour"
precision = 0
utDatetime = theDate.strftime("%Y-%m-%d")
elif lenDec < 5:
precisionUnit = "minute"
precision = 0
utDatetime = theDate.strftime("%Y-%m-%d %H:%M")
else:
precisionUnit = "second"
precision = lenDec - 5
if precision > 3:
precision = 3
secs = float(theDate.strftime("%S.%f"))
secs = "%02.*f" % (precision, secs)
utDatetime = theDate.strftime("%Y-%m-%d %H:%M:") + secs
if sqlDate:
utDatetime = utDatetime.replace(" ", "T")
else:
utDatetime = theDate
self.log.info('completed the ``mjd_to_ut_datetime`` method')
return utDatetime
def decimal_day_to_day_hour_min_sec(
self,
daysFloat):
"""*Convert a day from decimal format to hours mins and sec*
Precision should be respected.
**Key Arguments:**
- ``daysFloat`` -- the day as a decimal.
**Return:**
- ``daysInt`` -- day as an integer
- ``hoursInt`` -- hour as an integer (None if input precsion too low)
- ``minsInt`` -- mins as an integer (None if input precsion too low)
- ``secFloat`` -- secs as a float (None if input precsion too low)
**Usage:**
.. todo::
- replace `decimal_day_to_day_hour_min_sec` in all other code
.. code-block:: python
from astrocalc.times import conversions
converter = conversions(
log=log
)
daysInt, hoursInt, minsInt, secFloat = converter.decimal_day_to_day_hour_min_sec(
daysFloat=24.2453
)
print daysInt, hoursInt, minsInt, secFloat
# OUTPUT: 24, 5, 53, None
daysInt, hoursInt, minsInt, secFloat = converter.decimal_day_to_day_hour_min_sec(
daysFloat=24.1232435454
)
print "%(daysInt)s days, %(hoursInt)s hours, %(minsInt)s mins, %(secFloat)s sec" % locals()
# OUTPUT: 24 days, 2 hours, 57 mins, 28.242 sec
"""
self.log.info(
'starting the ``decimal_day_to_day_hour_min_sec`` method')
daysInt = int(daysFloat)
hoursFloat = (daysFloat - daysInt) * 24.
hoursInt = int(hoursFloat)
minsFloat = (hoursFloat - hoursInt) * 60.
minsInt = int(minsFloat)
secFloat = (minsFloat - minsInt) * 60.
# DETERMINE PRECISION
strday = repr(daysFloat)
if "." not in strday:
precisionUnit = "day"
precision = 0
hoursInt = None
minsInt = None
secFloat = None
else:
lenDec = len(strday.split(".")[-1])
if lenDec < 2:
precisionUnit = "day"
precision = 0
hoursInt = None
minsInt = None
secFloat = None
elif lenDec < 3:
precisionUnit = "hour"
precision = 0
minsInt = None
secFloat = None
elif lenDec < 5:
precisionUnit = "minute"
precision = 0
secFloat = None
else:
precisionUnit = "second"
precision = lenDec - 5
if precision > 3:
precision = 3
secFloat = "%02.*f" % (precision, secFloat)
self.log.info(
'completed the ``decimal_day_to_day_hour_min_sec`` method')
return daysInt, hoursInt, minsInt, secFloat
|
thespacedoctor/astrocalc
|
astrocalc/times/conversions.py
|
conversions.mjd_to_ut_datetime
|
python
|
def mjd_to_ut_datetime(
self,
mjd,
sqlDate=False,
datetimeObject=False):
self.log.info('starting the ``mjd_to_ut_datetime`` method')
from datetime import datetime
# CONVERT TO UNIXTIME
unixtime = (float(mjd) + 2400000.5 - 2440587.5) * 86400.0
theDate = datetime.utcfromtimestamp(unixtime)
if datetimeObject == False:
# DETERMINE PRECISION
strmjd = repr(mjd)
if "." not in strmjd:
precisionUnit = "day"
precision = 0
utDatetime = theDate.strftime("%Y-%m-%d")
else:
lenDec = len(strmjd.split(".")[-1])
if lenDec < 2:
precisionUnit = "day"
precision = 0
utDatetime = theDate.strftime("%Y-%m-%d")
elif lenDec < 3:
precisionUnit = "hour"
precision = 0
utDatetime = theDate.strftime("%Y-%m-%d")
elif lenDec < 5:
precisionUnit = "minute"
precision = 0
utDatetime = theDate.strftime("%Y-%m-%d %H:%M")
else:
precisionUnit = "second"
precision = lenDec - 5
if precision > 3:
precision = 3
secs = float(theDate.strftime("%S.%f"))
secs = "%02.*f" % (precision, secs)
utDatetime = theDate.strftime("%Y-%m-%d %H:%M:") + secs
if sqlDate:
utDatetime = utDatetime.replace(" ", "T")
else:
utDatetime = theDate
self.log.info('completed the ``mjd_to_ut_datetime`` method')
return utDatetime
|
*mjd to ut datetime*
Precision should be respected.
**Key Arguments:**
- ``mjd`` -- time in MJD.
- ``sqlDate`` -- add a 'T' between date and time instead of space
- ``datetimeObject`` -- return a datetime object instead of a string. Default *False*
.. todo::
- replace getDateFromMJD in all code
- replace getSQLDateFromMJD in all code
**Return:**
- ``utDatetime`` - the UT datetime in string format
**Usage:**
.. code-block:: python
from astrocalc.times import conversions
converter = conversions(
log=log
)
utDate = converter.mjd_to_ut_datetime(
mjd=57504.61577585013
)
print utDate
# OUT: 2016-04-26 14:46:43.033
utDate = converter.mjd_to_ut_datetime(
mjd=57504.61577585013,
sqlDate=True
)
print utDate
# OUT: 2016-04-26T14:46:43.033
|
train
|
https://github.com/thespacedoctor/astrocalc/blob/dfbebf9b86d7b2d2110c48a6a4f4194bf8885b86/astrocalc/times/conversions.py#L187-L276
| null |
class conversions():
"""
*The worker class for the conversions module*
**Key Arguments:**
- ``log`` -- logger
- ``settings`` -- the settings dictionary
**Usage:**
.. todo::
- add usage info
- create a sublime snippet for usage
- add mjd_to_date
- add decimal_day_to_day_hour_min_sec
- add date_to_mjd
- convert all functions in __init__ to modules
.. code-block:: python
usage code
"""
# Initialisation
def __init__(
self,
log,
settings=False,
):
self.log = log
log.debug("instansiating a new 'conversions' object")
self.settings = settings
# xt-self-arg-tmpx
return None
def get(self):
"""
*get the conversions object*
**Return:**
- ``conversions``
.. todo::
- @review: when complete, clean get method
- @review: when complete add logging
"""
self.log.info('starting the ``get`` method')
conversions = None
self.log.info('completed the ``get`` method')
return conversions
def ut_datetime_to_mjd(
self,
utDatetime):
"""*ut datetime to mjd*
If the date given has no time associated with it (e.g. ``20160426``), then the datetime assumed is ``20160426 00:00:00.0``.
Precision should be respected.
**Key Arguments:**
- ``utDatetime`` -- UT datetime. Can accept various formats e.g. ``201604261444``, ``20160426``, ``20160426144444.5452``, ``2016-04-26 14:44:44.234``, ``20160426 14h44m44.432s``
**Return:**
- ``mjd`` -- the MJD
.. todo ::
- replace getMJDFromSqlDate in all code
**Usage:**
.. code-block:: python
from astrocalc.times import conversions
converter = conversions(
log=log
)
mjd = converter.ut_datetime_to_mjd(utDatetime="20160426t1446")
print mjd
# OUT: 57504.6153
mjd = converter.ut_datetime_to_mjd(utDatetime="2016-04-26 14:44:44.234")
print mjd
# OUT: 57504.61440
"""
self.log.info('starting the ``ut_datetime_to_mjd`` method')
import time
import re
mjd = None
# TRIM WHITESPACE FROM AROUND STRING
utDatetime = utDatetime.strip()
# DATETIME REGEX
matchObject = re.match(
r'^(?P<year>\d{4})\D?(?P<month>(0\d|1[0-2]))\D?(?P<day>([0-2]\d|3[0-1])(\.\d+)?)(\D?(?P<hours>([0-1]\d|2[0-3]))\D?(?P<minutes>\d{2})(\D?(?P<seconds>\d{2}(\.\d*?)?))?)?s?$', utDatetime)
# RETURN ERROR IF REGEX NOT MATCHED
if not matchObject:
self.log.error(
'UT Datetime is not in a recognised format. Input value was `%(utDatetime)s`' % locals())
raise IOError(
'UT Datetime is not in a recognised format. Input value was `%(utDatetime)s`' % locals())
year = matchObject.group("year")
month = matchObject.group("month")
day = matchObject.group("day")
hours = matchObject.group("hours")
minutes = matchObject.group("minutes")
seconds = matchObject.group("seconds")
# CLEAN NUMBERS AND SET OUTPUT PRECISION
if "." in day:
fhours = (float(day) - int(float(day))) * 24
hours = int(fhours)
fminutes = (fhours - hours) * 60
minutes = int(fminutes)
seconds = fhours - minutes
precision = len(repr(day).split(".")[-1])
elif not hours:
hours = "00"
minutes = "00"
seconds = "00"
precision = 1
elif not seconds:
seconds = "00"
# PRECISION TO NEAREST MIN i.e. 0.000694444 DAYS
precision = 4
else:
if "." not in seconds:
precision = 5
else:
decLen = len(seconds.split(".")[-1])
precision = 5 + decLen
# CONVERT VALUES TO FLOAT
seconds = float(seconds)
year = float(year)
month = float(month)
day = float(day)
hours = float(hours)
minutes = float(minutes)
# DETERMINE EXTRA TIME (SMALLER THAN A SEC)
extraTime = 0.
if "." in repr(seconds):
extraTime = float("." + repr(seconds).split(".")
[-1]) / (24. * 60. * 60.)
# CONVERT TO UNIXTIME THEN MJD
t = (int(year), int(month), int(day), int(hours),
int(minutes), int(seconds), 0, 0, 0)
unixtime = int(time.mktime(t))
mjd = (unixtime / 86400.0 - 2400000.5 + 2440587.5) + extraTime
mjd = "%0.*f" % (precision, mjd)
self.log.info('completed the ``ut_datetime_to_mjd`` method')
return mjd
def decimal_day_to_day_hour_min_sec(
self,
daysFloat):
"""*Convert a day from decimal format to hours mins and sec*
Precision should be respected.
**Key Arguments:**
- ``daysFloat`` -- the day as a decimal.
**Return:**
- ``daysInt`` -- day as an integer
- ``hoursInt`` -- hour as an integer (None if input precsion too low)
- ``minsInt`` -- mins as an integer (None if input precsion too low)
- ``secFloat`` -- secs as a float (None if input precsion too low)
**Usage:**
.. todo::
- replace `decimal_day_to_day_hour_min_sec` in all other code
.. code-block:: python
from astrocalc.times import conversions
converter = conversions(
log=log
)
daysInt, hoursInt, minsInt, secFloat = converter.decimal_day_to_day_hour_min_sec(
daysFloat=24.2453
)
print daysInt, hoursInt, minsInt, secFloat
# OUTPUT: 24, 5, 53, None
daysInt, hoursInt, minsInt, secFloat = converter.decimal_day_to_day_hour_min_sec(
daysFloat=24.1232435454
)
print "%(daysInt)s days, %(hoursInt)s hours, %(minsInt)s mins, %(secFloat)s sec" % locals()
# OUTPUT: 24 days, 2 hours, 57 mins, 28.242 sec
"""
self.log.info(
'starting the ``decimal_day_to_day_hour_min_sec`` method')
daysInt = int(daysFloat)
hoursFloat = (daysFloat - daysInt) * 24.
hoursInt = int(hoursFloat)
minsFloat = (hoursFloat - hoursInt) * 60.
minsInt = int(minsFloat)
secFloat = (minsFloat - minsInt) * 60.
# DETERMINE PRECISION
strday = repr(daysFloat)
if "." not in strday:
precisionUnit = "day"
precision = 0
hoursInt = None
minsInt = None
secFloat = None
else:
lenDec = len(strday.split(".")[-1])
if lenDec < 2:
precisionUnit = "day"
precision = 0
hoursInt = None
minsInt = None
secFloat = None
elif lenDec < 3:
precisionUnit = "hour"
precision = 0
minsInt = None
secFloat = None
elif lenDec < 5:
precisionUnit = "minute"
precision = 0
secFloat = None
else:
precisionUnit = "second"
precision = lenDec - 5
if precision > 3:
precision = 3
secFloat = "%02.*f" % (precision, secFloat)
self.log.info(
'completed the ``decimal_day_to_day_hour_min_sec`` method')
return daysInt, hoursInt, minsInt, secFloat
|
thespacedoctor/astrocalc
|
astrocalc/times/conversions.py
|
conversions.decimal_day_to_day_hour_min_sec
|
python
|
def decimal_day_to_day_hour_min_sec(
self,
daysFloat):
self.log.info(
'starting the ``decimal_day_to_day_hour_min_sec`` method')
daysInt = int(daysFloat)
hoursFloat = (daysFloat - daysInt) * 24.
hoursInt = int(hoursFloat)
minsFloat = (hoursFloat - hoursInt) * 60.
minsInt = int(minsFloat)
secFloat = (minsFloat - minsInt) * 60.
# DETERMINE PRECISION
strday = repr(daysFloat)
if "." not in strday:
precisionUnit = "day"
precision = 0
hoursInt = None
minsInt = None
secFloat = None
else:
lenDec = len(strday.split(".")[-1])
if lenDec < 2:
precisionUnit = "day"
precision = 0
hoursInt = None
minsInt = None
secFloat = None
elif lenDec < 3:
precisionUnit = "hour"
precision = 0
minsInt = None
secFloat = None
elif lenDec < 5:
precisionUnit = "minute"
precision = 0
secFloat = None
else:
precisionUnit = "second"
precision = lenDec - 5
if precision > 3:
precision = 3
secFloat = "%02.*f" % (precision, secFloat)
self.log.info(
'completed the ``decimal_day_to_day_hour_min_sec`` method')
return daysInt, hoursInt, minsInt, secFloat
|
*Convert a day from decimal format to hours mins and sec*
Precision should be respected.
**Key Arguments:**
- ``daysFloat`` -- the day as a decimal.
**Return:**
- ``daysInt`` -- day as an integer
- ``hoursInt`` -- hour as an integer (None if input precsion too low)
- ``minsInt`` -- mins as an integer (None if input precsion too low)
- ``secFloat`` -- secs as a float (None if input precsion too low)
**Usage:**
.. todo::
- replace `decimal_day_to_day_hour_min_sec` in all other code
.. code-block:: python
from astrocalc.times import conversions
converter = conversions(
log=log
)
daysInt, hoursInt, minsInt, secFloat = converter.decimal_day_to_day_hour_min_sec(
daysFloat=24.2453
)
print daysInt, hoursInt, minsInt, secFloat
# OUTPUT: 24, 5, 53, None
daysInt, hoursInt, minsInt, secFloat = converter.decimal_day_to_day_hour_min_sec(
daysFloat=24.1232435454
)
print "%(daysInt)s days, %(hoursInt)s hours, %(minsInt)s mins, %(secFloat)s sec" % locals()
# OUTPUT: 24 days, 2 hours, 57 mins, 28.242 sec
|
train
|
https://github.com/thespacedoctor/astrocalc/blob/dfbebf9b86d7b2d2110c48a6a4f4194bf8885b86/astrocalc/times/conversions.py#L278-L363
| null |
class conversions():
"""
*The worker class for the conversions module*
**Key Arguments:**
- ``log`` -- logger
- ``settings`` -- the settings dictionary
**Usage:**
.. todo::
- add usage info
- create a sublime snippet for usage
- add mjd_to_date
- add decimal_day_to_day_hour_min_sec
- add date_to_mjd
- convert all functions in __init__ to modules
.. code-block:: python
usage code
"""
# Initialisation
def __init__(
self,
log,
settings=False,
):
self.log = log
log.debug("instansiating a new 'conversions' object")
self.settings = settings
# xt-self-arg-tmpx
return None
def get(self):
"""
*get the conversions object*
**Return:**
- ``conversions``
.. todo::
- @review: when complete, clean get method
- @review: when complete add logging
"""
self.log.info('starting the ``get`` method')
conversions = None
self.log.info('completed the ``get`` method')
return conversions
def ut_datetime_to_mjd(
self,
utDatetime):
"""*ut datetime to mjd*
If the date given has no time associated with it (e.g. ``20160426``), then the datetime assumed is ``20160426 00:00:00.0``.
Precision should be respected.
**Key Arguments:**
- ``utDatetime`` -- UT datetime. Can accept various formats e.g. ``201604261444``, ``20160426``, ``20160426144444.5452``, ``2016-04-26 14:44:44.234``, ``20160426 14h44m44.432s``
**Return:**
- ``mjd`` -- the MJD
.. todo ::
- replace getMJDFromSqlDate in all code
**Usage:**
.. code-block:: python
from astrocalc.times import conversions
converter = conversions(
log=log
)
mjd = converter.ut_datetime_to_mjd(utDatetime="20160426t1446")
print mjd
# OUT: 57504.6153
mjd = converter.ut_datetime_to_mjd(utDatetime="2016-04-26 14:44:44.234")
print mjd
# OUT: 57504.61440
"""
self.log.info('starting the ``ut_datetime_to_mjd`` method')
import time
import re
mjd = None
# TRIM WHITESPACE FROM AROUND STRING
utDatetime = utDatetime.strip()
# DATETIME REGEX
matchObject = re.match(
r'^(?P<year>\d{4})\D?(?P<month>(0\d|1[0-2]))\D?(?P<day>([0-2]\d|3[0-1])(\.\d+)?)(\D?(?P<hours>([0-1]\d|2[0-3]))\D?(?P<minutes>\d{2})(\D?(?P<seconds>\d{2}(\.\d*?)?))?)?s?$', utDatetime)
# RETURN ERROR IF REGEX NOT MATCHED
if not matchObject:
self.log.error(
'UT Datetime is not in a recognised format. Input value was `%(utDatetime)s`' % locals())
raise IOError(
'UT Datetime is not in a recognised format. Input value was `%(utDatetime)s`' % locals())
year = matchObject.group("year")
month = matchObject.group("month")
day = matchObject.group("day")
hours = matchObject.group("hours")
minutes = matchObject.group("minutes")
seconds = matchObject.group("seconds")
# CLEAN NUMBERS AND SET OUTPUT PRECISION
if "." in day:
fhours = (float(day) - int(float(day))) * 24
hours = int(fhours)
fminutes = (fhours - hours) * 60
minutes = int(fminutes)
seconds = fhours - minutes
precision = len(repr(day).split(".")[-1])
elif not hours:
hours = "00"
minutes = "00"
seconds = "00"
precision = 1
elif not seconds:
seconds = "00"
# PRECISION TO NEAREST MIN i.e. 0.000694444 DAYS
precision = 4
else:
if "." not in seconds:
precision = 5
else:
decLen = len(seconds.split(".")[-1])
precision = 5 + decLen
# CONVERT VALUES TO FLOAT
seconds = float(seconds)
year = float(year)
month = float(month)
day = float(day)
hours = float(hours)
minutes = float(minutes)
# DETERMINE EXTRA TIME (SMALLER THAN A SEC)
extraTime = 0.
if "." in repr(seconds):
extraTime = float("." + repr(seconds).split(".")
[-1]) / (24. * 60. * 60.)
# CONVERT TO UNIXTIME THEN MJD
t = (int(year), int(month), int(day), int(hours),
int(minutes), int(seconds), 0, 0, 0)
unixtime = int(time.mktime(t))
mjd = (unixtime / 86400.0 - 2400000.5 + 2440587.5) + extraTime
mjd = "%0.*f" % (precision, mjd)
self.log.info('completed the ``ut_datetime_to_mjd`` method')
return mjd
def mjd_to_ut_datetime(
self,
mjd,
sqlDate=False,
datetimeObject=False):
"""*mjd to ut datetime*
Precision should be respected.
**Key Arguments:**
- ``mjd`` -- time in MJD.
- ``sqlDate`` -- add a 'T' between date and time instead of space
- ``datetimeObject`` -- return a datetime object instead of a string. Default *False*
.. todo::
- replace getDateFromMJD in all code
- replace getSQLDateFromMJD in all code
**Return:**
- ``utDatetime`` - the UT datetime in string format
**Usage:**
.. code-block:: python
from astrocalc.times import conversions
converter = conversions(
log=log
)
utDate = converter.mjd_to_ut_datetime(
mjd=57504.61577585013
)
print utDate
# OUT: 2016-04-26 14:46:43.033
utDate = converter.mjd_to_ut_datetime(
mjd=57504.61577585013,
sqlDate=True
)
print utDate
# OUT: 2016-04-26T14:46:43.033
"""
self.log.info('starting the ``mjd_to_ut_datetime`` method')
from datetime import datetime
# CONVERT TO UNIXTIME
unixtime = (float(mjd) + 2400000.5 - 2440587.5) * 86400.0
theDate = datetime.utcfromtimestamp(unixtime)
if datetimeObject == False:
# DETERMINE PRECISION
strmjd = repr(mjd)
if "." not in strmjd:
precisionUnit = "day"
precision = 0
utDatetime = theDate.strftime("%Y-%m-%d")
else:
lenDec = len(strmjd.split(".")[-1])
if lenDec < 2:
precisionUnit = "day"
precision = 0
utDatetime = theDate.strftime("%Y-%m-%d")
elif lenDec < 3:
precisionUnit = "hour"
precision = 0
utDatetime = theDate.strftime("%Y-%m-%d")
elif lenDec < 5:
precisionUnit = "minute"
precision = 0
utDatetime = theDate.strftime("%Y-%m-%d %H:%M")
else:
precisionUnit = "second"
precision = lenDec - 5
if precision > 3:
precision = 3
secs = float(theDate.strftime("%S.%f"))
secs = "%02.*f" % (precision, secs)
utDatetime = theDate.strftime("%Y-%m-%d %H:%M:") + secs
if sqlDate:
utDatetime = utDatetime.replace(" ", "T")
else:
utDatetime = theDate
self.log.info('completed the ``mjd_to_ut_datetime`` method')
return utDatetime
|
openp2pdesign/makerlabs
|
makerlabs/makeinitaly_foundation.py
|
get_lab_text
|
python
|
def get_lab_text(lab_slug, language):
if language == "English" or language == "english" or language == "EN" or language == "En":
language = "en"
elif language == "Italian" or language == "italian" or language == "IT" or language == "It" or language == "it":
language = "it"
else:
language = "en"
wiki = MediaWiki(makeinitaly__foundation_api_url)
wiki_response = wiki.call(
{'action': 'query',
'titles': lab_slug + "/" + language,
'prop': 'revisions',
'rvprop': 'content'})
# If we don't know the pageid...
for i in wiki_response["query"]["pages"]:
if "revisions" in wiki_response["query"]["pages"][i]:
content = wiki_response["query"]["pages"][i]["revisions"][0]["*"]
else:
content = ""
# Clean the resulting string/list
newstr01 = content.replace("}}", "")
newstr02 = newstr01.replace("{{", "")
result = newstr02.rstrip("\n|").split("\n|")
return result[0]
|
Gets text description in English or Italian from a single lab from makeinitaly.foundation.
|
train
|
https://github.com/openp2pdesign/makerlabs/blob/b5838440174f10d370abb671358db9a99d7739fd/makerlabs/makeinitaly_foundation.py#L34-L61
| null |
# -*- encoding: utf-8 -*-
#
# Access data from makeinitaly.foundation
#
# Author: Massimo Menichinelli
# Homepage: http://www.openp2pdesign.org
# License: LGPL v.3
#
#
from classes import Lab
import json
from simplemediawiki import MediaWiki
import pandas as pd
makeinitaly__foundation_api_url = "http://makeinitaly.foundation/wiki/api.php"
class MILab(Lab):
"""Represents a Lab as it is described on makeinitaly.foundation."""
def __init__(self):
self.source = "makeinitaly.foundation"
self.lab_type = "Lab on makeinitaly.foundation"
self.continent = "Europe"
self.country_code = "IT"
self.country = "Italy"
def get_single_lab(lab_slug):
"""Gets data from a single lab from makeinitaly.foundation."""
wiki = MediaWiki(makeinitaly__foundation_api_url)
wiki_response = wiki.call(
{'action': 'query',
'titles': lab_slug,
'prop': 'revisions',
'rvprop': 'content'})
# If we don't know the pageid...
for i in wiki_response["query"]["pages"]:
content = wiki_response["query"]["pages"][i]["revisions"][0]["*"]
# Clean the resulting string/list
newstr01 = content.replace("}}", "")
newstr02 = newstr01.replace("{{", "")
result = newstr02.rstrip("\n|").split("\n|")
# result.remove(u'FabLab')
# Transform the data into a Lab object
current_lab = MILab()
# Add existing data
for i in result:
if "coordinates=" in i:
value = i.replace("coordinates=", "")
current_lab.coordinates = value
latlong = []
if ", " in value:
latlong = value.rstrip(", ").split(", ")
elif " , " in value:
latlong = value.rstrip(" , ").split(" , ")
else:
latlong = ["", ""]
current_lab.latitude = latlong[0]
current_lab.longitude = latlong[1]
elif "province=" in i:
value = i.replace("province=", "")
current_lab.province = value.upper()
elif "region=" in i:
value = i.replace("region=", "")
current_lab.region = value
elif "address=" in i:
value = i.replace("address=", "")
current_lab.address = value
elif "city=" in i:
value = i.replace("city=", "")
current_lab.city = value
elif "fablabsio=" in i:
value = i.replace("fablabsio=", "")
current_lab.fablabsio = value
elif "website=" in i:
value = i.replace("website=", "")
current_lab.website = value
elif "facebook=" in i:
value = i.replace("facebook=", "")
current_lab.facebook = value
elif "twitter=" in i:
value = i.replace("twitter=", "")
current_lab.twitter = value
elif "email=" in i:
value = i.replace("email=", "")
current_lab.email = value
elif "manager=" in i:
value = i.replace("manager=", "")
current_lab.manager = value
elif "birthyear=" in i:
value = i.replace("birthyear=", "")
current_lab.birthyear = value
current_lab.text_en = get_lab_text(lab_slug=lab_slug, language="en")
current_lab.text_it = get_lab_text(lab_slug=lab_slug, language="it")
return current_lab
def get_labs(format):
"""Gets data from all labs from makeinitaly.foundation."""
labs = []
# Get the first page of data
wiki = MediaWiki(makeinitaly__foundation_api_url)
wiki_response = wiki.call(
{'action': 'query',
'list': 'categorymembers',
'cmtitle': 'Category:Italian_FabLabs',
'cmlimit': '500'})
if "query-continue" in wiki_response:
nextpage = wiki_response[
"query-continue"]["categorymembers"]["cmcontinue"]
urls = []
for i in wiki_response["query"]["categorymembers"]:
urls.append(i["title"].replace(" ", "_"))
# Load all the Labs in the first page
for i in urls:
current_lab = get_single_lab(i)
labs.append(current_lab)
# Load all the Labs from the other pages
while "query-continue" in wiki_response:
wiki = MediaWiki(makeinitaly__foundation_api_url)
wiki_response = wiki.call({'action': 'query',
'list': 'categorymembers',
'cmtitle': 'Category:Hackerspace',
'cmlimit': '500',
"cmcontinue": nextpage})
urls = []
for i in wiki_response["query"]["categorymembers"]:
urls.append(i["title"].replace(" ", "_"))
# Load all the Labs
for i in urls:
current_lab = get_single_lab(i, data_format)
labs.append(current_lab)
if "query-continue" in wiki_response:
nextpage = wiki_response[
"query-continue"]["categorymembers"]["cmcontinue"]
else:
break
# Transform the list into a dictionary
labs_dict = {}
for j, k in enumerate(labs):
labs_dict[j] = k.__dict__
# Return a dictiornary / json
if format.lower() == "dict" or format.lower() == "json":
output = labs_dict
# Return a geojson
elif format.lower() == "geojson" or format.lower() == "geo":
labs_list = []
for l in labs_dict:
single = labs_dict[l].__dict__
single_lab = Feature(
type="Feature",
geometry=Point((single["latitude"], single["longitude"])),
properties=single)
labs_list.append(single_lab)
output = dumps(FeatureCollection(labs_list))
# Return a Pandas DataFrame
elif format.lower() == "pandas" or format.lower() == "dataframe":
output = {}
for j in labs_dict:
output[j] = labs_dict[j].__dict__
# Transform the dict into a Pandas DataFrame
output = pd.DataFrame.from_dict(output)
output = output.transpose()
# Return an object
elif format.lower() == "object" or format.lower() == "obj":
output = labs
# Default: return an object
else:
output = labs
# Return a proper json
if format.lower() == "json":
output = json.dumps(labs_dict)
return output
def labs_count():
"""Gets the number of current Labs registered on makeinitaly.foundation."""
labs = get_labs(data_format="dict")
return len(labs)
if __name__ == "__main__":
pass
|
openp2pdesign/makerlabs
|
makerlabs/makeinitaly_foundation.py
|
get_single_lab
|
python
|
def get_single_lab(lab_slug):
wiki = MediaWiki(makeinitaly__foundation_api_url)
wiki_response = wiki.call(
{'action': 'query',
'titles': lab_slug,
'prop': 'revisions',
'rvprop': 'content'})
# If we don't know the pageid...
for i in wiki_response["query"]["pages"]:
content = wiki_response["query"]["pages"][i]["revisions"][0]["*"]
# Clean the resulting string/list
newstr01 = content.replace("}}", "")
newstr02 = newstr01.replace("{{", "")
result = newstr02.rstrip("\n|").split("\n|")
# result.remove(u'FabLab')
# Transform the data into a Lab object
current_lab = MILab()
# Add existing data
for i in result:
if "coordinates=" in i:
value = i.replace("coordinates=", "")
current_lab.coordinates = value
latlong = []
if ", " in value:
latlong = value.rstrip(", ").split(", ")
elif " , " in value:
latlong = value.rstrip(" , ").split(" , ")
else:
latlong = ["", ""]
current_lab.latitude = latlong[0]
current_lab.longitude = latlong[1]
elif "province=" in i:
value = i.replace("province=", "")
current_lab.province = value.upper()
elif "region=" in i:
value = i.replace("region=", "")
current_lab.region = value
elif "address=" in i:
value = i.replace("address=", "")
current_lab.address = value
elif "city=" in i:
value = i.replace("city=", "")
current_lab.city = value
elif "fablabsio=" in i:
value = i.replace("fablabsio=", "")
current_lab.fablabsio = value
elif "website=" in i:
value = i.replace("website=", "")
current_lab.website = value
elif "facebook=" in i:
value = i.replace("facebook=", "")
current_lab.facebook = value
elif "twitter=" in i:
value = i.replace("twitter=", "")
current_lab.twitter = value
elif "email=" in i:
value = i.replace("email=", "")
current_lab.email = value
elif "manager=" in i:
value = i.replace("manager=", "")
current_lab.manager = value
elif "birthyear=" in i:
value = i.replace("birthyear=", "")
current_lab.birthyear = value
current_lab.text_en = get_lab_text(lab_slug=lab_slug, language="en")
current_lab.text_it = get_lab_text(lab_slug=lab_slug, language="it")
return current_lab
|
Gets data from a single lab from makeinitaly.foundation.
|
train
|
https://github.com/openp2pdesign/makerlabs/blob/b5838440174f10d370abb671358db9a99d7739fd/makerlabs/makeinitaly_foundation.py#L64-L137
|
[
"def get_lab_text(lab_slug, language):\n \"\"\"Gets text description in English or Italian from a single lab from makeinitaly.foundation.\"\"\"\n if language == \"English\" or language == \"english\" or language == \"EN\" or language == \"En\":\n language = \"en\"\n elif language == \"Italian\" or language == \"italian\" or language == \"IT\" or language == \"It\" or language == \"it\":\n language = \"it\"\n else:\n language = \"en\"\n wiki = MediaWiki(makeinitaly__foundation_api_url)\n wiki_response = wiki.call(\n {'action': 'query',\n 'titles': lab_slug + \"/\" + language,\n 'prop': 'revisions',\n 'rvprop': 'content'})\n\n # If we don't know the pageid...\n for i in wiki_response[\"query\"][\"pages\"]:\n if \"revisions\" in wiki_response[\"query\"][\"pages\"][i]:\n content = wiki_response[\"query\"][\"pages\"][i][\"revisions\"][0][\"*\"]\n else:\n content = \"\"\n\n # Clean the resulting string/list\n newstr01 = content.replace(\"}}\", \"\")\n newstr02 = newstr01.replace(\"{{\", \"\")\n result = newstr02.rstrip(\"\\n|\").split(\"\\n|\")\n\n return result[0]\n"
] |
# -*- encoding: utf-8 -*-
#
# Access data from makeinitaly.foundation
#
# Author: Massimo Menichinelli
# Homepage: http://www.openp2pdesign.org
# License: LGPL v.3
#
#
from classes import Lab
import json
from simplemediawiki import MediaWiki
import pandas as pd
makeinitaly__foundation_api_url = "http://makeinitaly.foundation/wiki/api.php"
class MILab(Lab):
"""Represents a Lab as it is described on makeinitaly.foundation."""
def __init__(self):
self.source = "makeinitaly.foundation"
self.lab_type = "Lab on makeinitaly.foundation"
self.continent = "Europe"
self.country_code = "IT"
self.country = "Italy"
def get_lab_text(lab_slug, language):
"""Gets text description in English or Italian from a single lab from makeinitaly.foundation."""
if language == "English" or language == "english" or language == "EN" or language == "En":
language = "en"
elif language == "Italian" or language == "italian" or language == "IT" or language == "It" or language == "it":
language = "it"
else:
language = "en"
wiki = MediaWiki(makeinitaly__foundation_api_url)
wiki_response = wiki.call(
{'action': 'query',
'titles': lab_slug + "/" + language,
'prop': 'revisions',
'rvprop': 'content'})
# If we don't know the pageid...
for i in wiki_response["query"]["pages"]:
if "revisions" in wiki_response["query"]["pages"][i]:
content = wiki_response["query"]["pages"][i]["revisions"][0]["*"]
else:
content = ""
# Clean the resulting string/list
newstr01 = content.replace("}}", "")
newstr02 = newstr01.replace("{{", "")
result = newstr02.rstrip("\n|").split("\n|")
return result[0]
def get_labs(format):
"""Gets data from all labs from makeinitaly.foundation."""
labs = []
# Get the first page of data
wiki = MediaWiki(makeinitaly__foundation_api_url)
wiki_response = wiki.call(
{'action': 'query',
'list': 'categorymembers',
'cmtitle': 'Category:Italian_FabLabs',
'cmlimit': '500'})
if "query-continue" in wiki_response:
nextpage = wiki_response[
"query-continue"]["categorymembers"]["cmcontinue"]
urls = []
for i in wiki_response["query"]["categorymembers"]:
urls.append(i["title"].replace(" ", "_"))
# Load all the Labs in the first page
for i in urls:
current_lab = get_single_lab(i)
labs.append(current_lab)
# Load all the Labs from the other pages
while "query-continue" in wiki_response:
wiki = MediaWiki(makeinitaly__foundation_api_url)
wiki_response = wiki.call({'action': 'query',
'list': 'categorymembers',
'cmtitle': 'Category:Hackerspace',
'cmlimit': '500',
"cmcontinue": nextpage})
urls = []
for i in wiki_response["query"]["categorymembers"]:
urls.append(i["title"].replace(" ", "_"))
# Load all the Labs
for i in urls:
current_lab = get_single_lab(i, data_format)
labs.append(current_lab)
if "query-continue" in wiki_response:
nextpage = wiki_response[
"query-continue"]["categorymembers"]["cmcontinue"]
else:
break
# Transform the list into a dictionary
labs_dict = {}
for j, k in enumerate(labs):
labs_dict[j] = k.__dict__
# Return a dictiornary / json
if format.lower() == "dict" or format.lower() == "json":
output = labs_dict
# Return a geojson
elif format.lower() == "geojson" or format.lower() == "geo":
labs_list = []
for l in labs_dict:
single = labs_dict[l].__dict__
single_lab = Feature(
type="Feature",
geometry=Point((single["latitude"], single["longitude"])),
properties=single)
labs_list.append(single_lab)
output = dumps(FeatureCollection(labs_list))
# Return a Pandas DataFrame
elif format.lower() == "pandas" or format.lower() == "dataframe":
output = {}
for j in labs_dict:
output[j] = labs_dict[j].__dict__
# Transform the dict into a Pandas DataFrame
output = pd.DataFrame.from_dict(output)
output = output.transpose()
# Return an object
elif format.lower() == "object" or format.lower() == "obj":
output = labs
# Default: return an object
else:
output = labs
# Return a proper json
if format.lower() == "json":
output = json.dumps(labs_dict)
return output
def labs_count():
"""Gets the number of current Labs registered on makeinitaly.foundation."""
labs = get_labs(data_format="dict")
return len(labs)
if __name__ == "__main__":
pass
|
openp2pdesign/makerlabs
|
makerlabs/makeinitaly_foundation.py
|
get_labs
|
python
|
def get_labs(format):
labs = []
# Get the first page of data
wiki = MediaWiki(makeinitaly__foundation_api_url)
wiki_response = wiki.call(
{'action': 'query',
'list': 'categorymembers',
'cmtitle': 'Category:Italian_FabLabs',
'cmlimit': '500'})
if "query-continue" in wiki_response:
nextpage = wiki_response[
"query-continue"]["categorymembers"]["cmcontinue"]
urls = []
for i in wiki_response["query"]["categorymembers"]:
urls.append(i["title"].replace(" ", "_"))
# Load all the Labs in the first page
for i in urls:
current_lab = get_single_lab(i)
labs.append(current_lab)
# Load all the Labs from the other pages
while "query-continue" in wiki_response:
wiki = MediaWiki(makeinitaly__foundation_api_url)
wiki_response = wiki.call({'action': 'query',
'list': 'categorymembers',
'cmtitle': 'Category:Hackerspace',
'cmlimit': '500',
"cmcontinue": nextpage})
urls = []
for i in wiki_response["query"]["categorymembers"]:
urls.append(i["title"].replace(" ", "_"))
# Load all the Labs
for i in urls:
current_lab = get_single_lab(i, data_format)
labs.append(current_lab)
if "query-continue" in wiki_response:
nextpage = wiki_response[
"query-continue"]["categorymembers"]["cmcontinue"]
else:
break
# Transform the list into a dictionary
labs_dict = {}
for j, k in enumerate(labs):
labs_dict[j] = k.__dict__
# Return a dictiornary / json
if format.lower() == "dict" or format.lower() == "json":
output = labs_dict
# Return a geojson
elif format.lower() == "geojson" or format.lower() == "geo":
labs_list = []
for l in labs_dict:
single = labs_dict[l].__dict__
single_lab = Feature(
type="Feature",
geometry=Point((single["latitude"], single["longitude"])),
properties=single)
labs_list.append(single_lab)
output = dumps(FeatureCollection(labs_list))
# Return a Pandas DataFrame
elif format.lower() == "pandas" or format.lower() == "dataframe":
output = {}
for j in labs_dict:
output[j] = labs_dict[j].__dict__
# Transform the dict into a Pandas DataFrame
output = pd.DataFrame.from_dict(output)
output = output.transpose()
# Return an object
elif format.lower() == "object" or format.lower() == "obj":
output = labs
# Default: return an object
else:
output = labs
# Return a proper json
if format.lower() == "json":
output = json.dumps(labs_dict)
return output
|
Gets data from all labs from makeinitaly.foundation.
|
train
|
https://github.com/openp2pdesign/makerlabs/blob/b5838440174f10d370abb671358db9a99d7739fd/makerlabs/makeinitaly_foundation.py#L140-L225
|
[
"def get_single_lab(lab_slug):\n \"\"\"Gets data from a single lab from makeinitaly.foundation.\"\"\"\n wiki = MediaWiki(makeinitaly__foundation_api_url)\n wiki_response = wiki.call(\n {'action': 'query',\n 'titles': lab_slug,\n 'prop': 'revisions',\n 'rvprop': 'content'})\n\n # If we don't know the pageid...\n for i in wiki_response[\"query\"][\"pages\"]:\n content = wiki_response[\"query\"][\"pages\"][i][\"revisions\"][0][\"*\"]\n\n # Clean the resulting string/list\n newstr01 = content.replace(\"}}\", \"\")\n newstr02 = newstr01.replace(\"{{\", \"\")\n result = newstr02.rstrip(\"\\n|\").split(\"\\n|\")\n # result.remove(u'FabLab')\n\n # Transform the data into a Lab object\n current_lab = MILab()\n\n # Add existing data\n for i in result:\n if \"coordinates=\" in i:\n value = i.replace(\"coordinates=\", \"\")\n current_lab.coordinates = value\n latlong = []\n if \", \" in value:\n latlong = value.rstrip(\", \").split(\", \")\n elif \" , \" in value:\n latlong = value.rstrip(\" , \").split(\" , \")\n else:\n latlong = [\"\", \"\"]\n current_lab.latitude = latlong[0]\n current_lab.longitude = latlong[1]\n elif \"province=\" in i:\n value = i.replace(\"province=\", \"\")\n current_lab.province = value.upper()\n elif \"region=\" in i:\n value = i.replace(\"region=\", \"\")\n current_lab.region = value\n elif \"address=\" in i:\n value = i.replace(\"address=\", \"\")\n current_lab.address = value\n elif \"city=\" in i:\n value = i.replace(\"city=\", \"\")\n current_lab.city = value\n elif \"fablabsio=\" in i:\n value = i.replace(\"fablabsio=\", \"\")\n current_lab.fablabsio = value\n elif \"website=\" in i:\n value = i.replace(\"website=\", \"\")\n current_lab.website = value\n elif \"facebook=\" in i:\n value = i.replace(\"facebook=\", \"\")\n current_lab.facebook = value\n elif \"twitter=\" in i:\n value = i.replace(\"twitter=\", \"\")\n current_lab.twitter = value\n elif \"email=\" in i:\n value = i.replace(\"email=\", \"\")\n current_lab.email = value\n elif \"manager=\" in i:\n value = i.replace(\"manager=\", \"\")\n current_lab.manager = value\n elif \"birthyear=\" in i:\n value = i.replace(\"birthyear=\", \"\")\n current_lab.birthyear = value\n\n current_lab.text_en = get_lab_text(lab_slug=lab_slug, language=\"en\")\n current_lab.text_it = get_lab_text(lab_slug=lab_slug, language=\"it\")\n\n return current_lab\n"
] |
# -*- encoding: utf-8 -*-
#
# Access data from makeinitaly.foundation
#
# Author: Massimo Menichinelli
# Homepage: http://www.openp2pdesign.org
# License: LGPL v.3
#
#
from classes import Lab
import json
from simplemediawiki import MediaWiki
import pandas as pd
makeinitaly__foundation_api_url = "http://makeinitaly.foundation/wiki/api.php"
class MILab(Lab):
"""Represents a Lab as it is described on makeinitaly.foundation."""
def __init__(self):
self.source = "makeinitaly.foundation"
self.lab_type = "Lab on makeinitaly.foundation"
self.continent = "Europe"
self.country_code = "IT"
self.country = "Italy"
def get_lab_text(lab_slug, language):
"""Gets text description in English or Italian from a single lab from makeinitaly.foundation."""
if language == "English" or language == "english" or language == "EN" or language == "En":
language = "en"
elif language == "Italian" or language == "italian" or language == "IT" or language == "It" or language == "it":
language = "it"
else:
language = "en"
wiki = MediaWiki(makeinitaly__foundation_api_url)
wiki_response = wiki.call(
{'action': 'query',
'titles': lab_slug + "/" + language,
'prop': 'revisions',
'rvprop': 'content'})
# If we don't know the pageid...
for i in wiki_response["query"]["pages"]:
if "revisions" in wiki_response["query"]["pages"][i]:
content = wiki_response["query"]["pages"][i]["revisions"][0]["*"]
else:
content = ""
# Clean the resulting string/list
newstr01 = content.replace("}}", "")
newstr02 = newstr01.replace("{{", "")
result = newstr02.rstrip("\n|").split("\n|")
return result[0]
def get_single_lab(lab_slug):
"""Gets data from a single lab from makeinitaly.foundation."""
wiki = MediaWiki(makeinitaly__foundation_api_url)
wiki_response = wiki.call(
{'action': 'query',
'titles': lab_slug,
'prop': 'revisions',
'rvprop': 'content'})
# If we don't know the pageid...
for i in wiki_response["query"]["pages"]:
content = wiki_response["query"]["pages"][i]["revisions"][0]["*"]
# Clean the resulting string/list
newstr01 = content.replace("}}", "")
newstr02 = newstr01.replace("{{", "")
result = newstr02.rstrip("\n|").split("\n|")
# result.remove(u'FabLab')
# Transform the data into a Lab object
current_lab = MILab()
# Add existing data
for i in result:
if "coordinates=" in i:
value = i.replace("coordinates=", "")
current_lab.coordinates = value
latlong = []
if ", " in value:
latlong = value.rstrip(", ").split(", ")
elif " , " in value:
latlong = value.rstrip(" , ").split(" , ")
else:
latlong = ["", ""]
current_lab.latitude = latlong[0]
current_lab.longitude = latlong[1]
elif "province=" in i:
value = i.replace("province=", "")
current_lab.province = value.upper()
elif "region=" in i:
value = i.replace("region=", "")
current_lab.region = value
elif "address=" in i:
value = i.replace("address=", "")
current_lab.address = value
elif "city=" in i:
value = i.replace("city=", "")
current_lab.city = value
elif "fablabsio=" in i:
value = i.replace("fablabsio=", "")
current_lab.fablabsio = value
elif "website=" in i:
value = i.replace("website=", "")
current_lab.website = value
elif "facebook=" in i:
value = i.replace("facebook=", "")
current_lab.facebook = value
elif "twitter=" in i:
value = i.replace("twitter=", "")
current_lab.twitter = value
elif "email=" in i:
value = i.replace("email=", "")
current_lab.email = value
elif "manager=" in i:
value = i.replace("manager=", "")
current_lab.manager = value
elif "birthyear=" in i:
value = i.replace("birthyear=", "")
current_lab.birthyear = value
current_lab.text_en = get_lab_text(lab_slug=lab_slug, language="en")
current_lab.text_it = get_lab_text(lab_slug=lab_slug, language="it")
return current_lab
def labs_count():
"""Gets the number of current Labs registered on makeinitaly.foundation."""
labs = get_labs(data_format="dict")
return len(labs)
if __name__ == "__main__":
pass
|
openp2pdesign/makerlabs
|
makerlabs/utils.py
|
get_location
|
python
|
def get_location(query, format, api_key):
# Play nice with the API...
sleep(1)
geolocator = OpenCage(api_key=api_key, timeout=10)
# Variables for storing the data
data = {"city": None,
"address_1": None,
"postal_code": None,
"country": None,
"county": None,
"state": None,
"country_code": None,
"latitude": None,
"longitude": None,
"continent": None}
road = ""
number = ""
# Default None values
location_data = {"city": None,
"road": None,
"house_number": None,
"postcode": None,
"country": None,
"county": None,
"state": None,
"ISO_3166-1_alpha-2": None,
"country_code": None,
"lat": None,
"lng": None}
# Reverse geocoding ... from coordinates to address
if format == "reverse":
# If the query (coordinates) is not empty
if query is None or len(query) < 3:
pass
else:
location = geolocator.reverse(query)
if location is not None:
location_data = location[0].raw[u'components']
location_data["lat"] = location[0].raw[u'geometry']["lat"]
location_data["lng"] = location[0].raw[u'geometry']["lng"]
# Direct geocoding ... from address to coordinates and full address
if format == "direct":
# If the query (address) is not empty
if query is None or len(query) < 3:
pass
else:
location = geolocator.geocode(query)
if location is not None:
location_data = location.raw[u'components']
location_data["lat"] = location.raw[u'geometry']["lat"]
location_data["lng"] = location.raw[u'geometry']["lng"]
# Extract the meaningful data
for component in location_data:
if component == "town" or component == "city":
data["city"] = location_data[component]
if component == "road":
road = location_data[component]
if component == "house_number":
number = location_data[component]
if component == "postcode":
data["postal_code"] = location_data[component]
if component == "country":
data["country"] = location_data[component]
if component == "county":
data["county"] = location_data[component]
if component == "state":
data["state"] = location_data[component]
if component == "ISO_3166-1_alpha-2":
data["country_code"] = location_data[component]
# The address need to be reconstructed
data["address_1"] = unicode(road) + " " + unicode(number)
data["latitude"] = location_data["lat"]
data["longitude"] = location_data["lng"]
# Format the country code to three letters
try:
country_data = transformations.cca2_to_ccn(data["country_code"])
data["country_code"] = transformations.ccn_to_cca3(country_data)
except:
data["country_code"] = None
# Get the continent
try:
country_data = transformations.cc_to_cn(data["country_code"])
data["continent"] = transformations.cn_to_ctn(country_data)
except:
data["continent"] = None
# Return the final data
return data
|
Get geographic data of a lab in a coherent way for all labs.
|
train
|
https://github.com/openp2pdesign/makerlabs/blob/b5838440174f10d370abb671358db9a99d7739fd/makerlabs/utils.py#L18-L110
| null |
# -*- encoding: utf-8 -*-
#
# Fuctions for the other modules
#
# Author: Massimo Menichinelli
# Homepage: http://www.openp2pdesign.org
# License: LGPL v.3
#
#
# Import necessary modules
from geopy.geocoders import OpenCage
from time import sleep
from incf.countryutils import transformations
if __name__ == "__main__":
pass
|
openp2pdesign/makerlabs
|
makerlabs/repaircafe_org.py
|
data_from_repaircafe_org
|
python
|
def data_from_repaircafe_org():
# Use Chrome as a browser
browser = webdriver.Chrome()
# Use PhantomJS as a browser
# browser = webdriver.PhantomJS('phantomjs')
browser.get("https://repaircafe.org/en/?s=Contact+the+local+organisers")
browser.maximize_window()
# Iterate over results (the #viewmore_link button)
viewmore_button = True
while viewmore_button:
try:
viewmore = browser.find_element_by_id("viewmore_link")
# Scroll to the link in order to make it visible
browser.execute_script("arguments[0].scrollIntoView();", viewmore)
# Keep searching
viewmore.click()
except:
# If there's an error, we have reached the end of the search
viewmore_button = False
# Give a bit of time for loading the search results
sleep(2)
# Load the source code
page_source = BeautifulSoup(browser.page_source, "lxml")
# Close the browser
browser.quit()
# Parse the source code in order to find all the links under H4s
data = []
for h4 in page_source.find_all("h4"):
for a in h4.find_all('a', href=True):
data.append({"name": a.contents[0], "url": a['href']})
return data
|
Gets data from repaircafe_org.
|
train
|
https://github.com/openp2pdesign/makerlabs/blob/b5838440174f10d370abb671358db9a99d7739fd/makerlabs/repaircafe_org.py#L44-L79
| null |
# -*- encoding: utf-8 -*-
#
# Access data from repaircafe.org
#
# Author: Massimo Menichinelli
# Homepage: http://www.openp2pdesign.org
# License: LGPL v.3
#
#
from classes import Lab
import json
from geojson import dumps, Feature, Point, FeatureCollection
from geopy.geocoders import Nominatim
import pycountry
from pycountry_convert import country_alpha2_to_continent_code
from time import sleep
from selenium import webdriver
from bs4 import BeautifulSoup
import requests
import pandas as pd
from utils import get_location
# Geocoding variable
geolocator = Nominatim()
# Endpoints
fablabs_io_labs_api_url_v0 = "https://api.fablabs.io/v0/labs.json"
fablabs_io_projects_api_url_v0 = "https://api.fablabs.io/v0/projects.json"
class RepairCafe(Lab):
"""Represents a Repair Cafe as it is described on repaircafe.org."""
def __init__(self):
self.source = "repaircafe.org"
self.lab_type = "Repair Cafe"
def get_labs(format):
"""Gets Repair Cafe data from repairecafe.org."""
data = data_from_repaircafe_org()
repaircafes = {}
# Load all the Repair Cafes
for i in data:
# Create a lab
current_lab = RepairCafe()
# Add existing data from first scraping
current_lab.name = i["name"]
slug = i["url"].replace("https://repaircafe.org/locations/", "")
if slug.endswith("/"):
slug.replace("/", "")
current_lab.slug = slug
current_lab.url = i["url"]
# Scrape for more data
page_request = requests.get(i["url"])
if page_request.status_code == 200:
page_source = BeautifulSoup(page_request.text, "lxml")
else:
output = "There was an error while accessing data on repaircafe.org."
# Find Facebook and Twitter links, add also the other ones
current_lab.links = {"facebook": "", "twitter": ""}
column = page_source.find_all("div", class_="sc_column_item_2")
for j in column:
for p in j.find_all('p'):
for a in p.find_all('a', href=True):
if "facebook" in a['href']:
current_lab.links["facebook"] = a['href']
elif "twitter" in a['href']:
current_lab.links["twitter"] = a['href']
else:
current_lab.links[a['href']] = a['href']
# Find address
column = page_source.find_all("div", class_="sc_column_item_1")
for x in column:
if x.string:
print x.string.strip()
exit()
# current_lab.address_1 = i["address_1"]
# current_lab.address_2 = i["address_2"]
# current_lab.address_notes = i["address_notes"]
# current_lab.blurb = i["blurb"]
# current_lab.city = i["city"]
# current_lab.country_code = i["country_code"]
# current_lab.county = i["county"]
# current_lab.description = i["description"]
# current_lab.email = i["email"]
# current_lab.id = i["id"]
# current_lab.phone = i["phone"]
# current_lab.postal_code = i["postal_code"]
#
#
# current_lab.continent = country_alpha2_to_continent_code(i[
# "country_code"].upper())
# current_country = pycountry.countries.get(
# alpha_2=i["country_code"].upper())
# current_lab.country_code = current_country.alpha_3
# current_lab.country = current_country.name
# if i["longitude"] is None or i["latitude"] is None:
# # Be nice with the geocoder API limit
# errorsb += 1
# # sleep(10)
# # location = geolocator.geocode(
# # {"city": i["city"],
# # "country": i["country_code"].upper()},
# # addressdetails=True,
# # language="en")
# # if location is not None:
# # current_lab.latitude = location.latitude
# # current_lab.longitude = location.longitude
# # if "county" in location.raw["address"]:
# # current_lab.county = location.raw["address"][
# # "county"].encode('utf-8')
# # if "state" in location.raw["address"]:
# # current_lab.state = location.raw["address"][
# # "state"].encode('utf-8')
# else:
# # Be nice with the geocoder API limit
# sleep(10)
# errorsa += 1
# # location = geolocator.reverse((i["latitude"], i["longitude"]))
# # if location is not None:
# # if "county" in location.raw["address"]:
# # current_lab.county = location.raw["address"][
# # "county"].encode('utf-8')
# # if "state" in location.raw["address"]:
# # current_lab.state = location.raw["address"][
# # "state"].encode('utf-8')
# Add the lab to the list
repaircafes[slug] = current_lab
# Return a dictiornary / json
if format.lower() == "dict" or format.lower() == "json":
output = {}
for j in repaircafes:
output[j] = repaircafes[j].__dict__
# Return a geojson
elif format.lower() == "geojson" or format.lower() == "geo":
labs_list = []
for l in repaircafes:
single = repaircafes[l].__dict__
single_lab = Feature(
type="Feature",
geometry=Point((single["latitude"], single["longitude"])),
properties=single)
labs_list.append(single_lab)
output = dumps(FeatureCollection(labs_list))
# Return a Pandas DataFrame
elif format.lower() == "pandas" or format.lower() == "dataframe":
output = {}
for j in repaircafes:
output[j] = repaircafes[j].__dict__
# Transform the dict into a Pandas DataFrame
output = pd.DataFrame.from_dict(output)
output = output.transpose()
# Return an object
elif format.lower() == "object" or format.lower() == "obj":
output = repaircafes
# Default: return an oject
else:
output = repaircafes
# Return a proper json
if format.lower() == "json":
output = json.dumps(output)
return output
def labs_count():
"""Gets the number of current Repair Cafes registered on repaircafe.org."""
repaircafes = data_from_repaircafe_org()
return len(repaircafes["labs"])
if __name__ == "__main__":
print get_labs(format="json")
|
openp2pdesign/makerlabs
|
makerlabs/repaircafe_org.py
|
get_labs
|
python
|
def get_labs(format):
data = data_from_repaircafe_org()
repaircafes = {}
# Load all the Repair Cafes
for i in data:
# Create a lab
current_lab = RepairCafe()
# Add existing data from first scraping
current_lab.name = i["name"]
slug = i["url"].replace("https://repaircafe.org/locations/", "")
if slug.endswith("/"):
slug.replace("/", "")
current_lab.slug = slug
current_lab.url = i["url"]
# Scrape for more data
page_request = requests.get(i["url"])
if page_request.status_code == 200:
page_source = BeautifulSoup(page_request.text, "lxml")
else:
output = "There was an error while accessing data on repaircafe.org."
# Find Facebook and Twitter links, add also the other ones
current_lab.links = {"facebook": "", "twitter": ""}
column = page_source.find_all("div", class_="sc_column_item_2")
for j in column:
for p in j.find_all('p'):
for a in p.find_all('a', href=True):
if "facebook" in a['href']:
current_lab.links["facebook"] = a['href']
elif "twitter" in a['href']:
current_lab.links["twitter"] = a['href']
else:
current_lab.links[a['href']] = a['href']
# Find address
column = page_source.find_all("div", class_="sc_column_item_1")
for x in column:
if x.string:
print x.string.strip()
exit()
# current_lab.address_1 = i["address_1"]
# current_lab.address_2 = i["address_2"]
# current_lab.address_notes = i["address_notes"]
# current_lab.blurb = i["blurb"]
# current_lab.city = i["city"]
# current_lab.country_code = i["country_code"]
# current_lab.county = i["county"]
# current_lab.description = i["description"]
# current_lab.email = i["email"]
# current_lab.id = i["id"]
# current_lab.phone = i["phone"]
# current_lab.postal_code = i["postal_code"]
#
#
# current_lab.continent = country_alpha2_to_continent_code(i[
# "country_code"].upper())
# current_country = pycountry.countries.get(
# alpha_2=i["country_code"].upper())
# current_lab.country_code = current_country.alpha_3
# current_lab.country = current_country.name
# if i["longitude"] is None or i["latitude"] is None:
# # Be nice with the geocoder API limit
# errorsb += 1
# # sleep(10)
# # location = geolocator.geocode(
# # {"city": i["city"],
# # "country": i["country_code"].upper()},
# # addressdetails=True,
# # language="en")
# # if location is not None:
# # current_lab.latitude = location.latitude
# # current_lab.longitude = location.longitude
# # if "county" in location.raw["address"]:
# # current_lab.county = location.raw["address"][
# # "county"].encode('utf-8')
# # if "state" in location.raw["address"]:
# # current_lab.state = location.raw["address"][
# # "state"].encode('utf-8')
# else:
# # Be nice with the geocoder API limit
# sleep(10)
# errorsa += 1
# # location = geolocator.reverse((i["latitude"], i["longitude"]))
# # if location is not None:
# # if "county" in location.raw["address"]:
# # current_lab.county = location.raw["address"][
# # "county"].encode('utf-8')
# # if "state" in location.raw["address"]:
# # current_lab.state = location.raw["address"][
# # "state"].encode('utf-8')
# Add the lab to the list
repaircafes[slug] = current_lab
# Return a dictiornary / json
if format.lower() == "dict" or format.lower() == "json":
output = {}
for j in repaircafes:
output[j] = repaircafes[j].__dict__
# Return a geojson
elif format.lower() == "geojson" or format.lower() == "geo":
labs_list = []
for l in repaircafes:
single = repaircafes[l].__dict__
single_lab = Feature(
type="Feature",
geometry=Point((single["latitude"], single["longitude"])),
properties=single)
labs_list.append(single_lab)
output = dumps(FeatureCollection(labs_list))
# Return a Pandas DataFrame
elif format.lower() == "pandas" or format.lower() == "dataframe":
output = {}
for j in repaircafes:
output[j] = repaircafes[j].__dict__
# Transform the dict into a Pandas DataFrame
output = pd.DataFrame.from_dict(output)
output = output.transpose()
# Return an object
elif format.lower() == "object" or format.lower() == "obj":
output = repaircafes
# Default: return an oject
else:
output = repaircafes
# Return a proper json
if format.lower() == "json":
output = json.dumps(output)
return output
|
Gets Repair Cafe data from repairecafe.org.
|
train
|
https://github.com/openp2pdesign/makerlabs/blob/b5838440174f10d370abb671358db9a99d7739fd/makerlabs/repaircafe_org.py#L82-L214
|
[
"def data_from_repaircafe_org():\n \"\"\"Gets data from repaircafe_org.\"\"\"\n\n # Use Chrome as a browser\n browser = webdriver.Chrome()\n # Use PhantomJS as a browser\n # browser = webdriver.PhantomJS('phantomjs')\n browser.get(\"https://repaircafe.org/en/?s=Contact+the+local+organisers\")\n browser.maximize_window()\n\n # Iterate over results (the #viewmore_link button)\n viewmore_button = True\n while viewmore_button:\n try:\n viewmore = browser.find_element_by_id(\"viewmore_link\")\n # Scroll to the link in order to make it visible\n browser.execute_script(\"arguments[0].scrollIntoView();\", viewmore)\n # Keep searching\n viewmore.click()\n except:\n # If there's an error, we have reached the end of the search\n viewmore_button = False\n # Give a bit of time for loading the search results\n sleep(2)\n\n # Load the source code\n page_source = BeautifulSoup(browser.page_source, \"lxml\")\n # Close the browser\n browser.quit()\n # Parse the source code in order to find all the links under H4s\n data = []\n for h4 in page_source.find_all(\"h4\"):\n for a in h4.find_all('a', href=True):\n data.append({\"name\": a.contents[0], \"url\": a['href']})\n\n return data\n"
] |
# -*- encoding: utf-8 -*-
#
# Access data from repaircafe.org
#
# Author: Massimo Menichinelli
# Homepage: http://www.openp2pdesign.org
# License: LGPL v.3
#
#
from classes import Lab
import json
from geojson import dumps, Feature, Point, FeatureCollection
from geopy.geocoders import Nominatim
import pycountry
from pycountry_convert import country_alpha2_to_continent_code
from time import sleep
from selenium import webdriver
from bs4 import BeautifulSoup
import requests
import pandas as pd
from utils import get_location
# Geocoding variable
geolocator = Nominatim()
# Endpoints
fablabs_io_labs_api_url_v0 = "https://api.fablabs.io/v0/labs.json"
fablabs_io_projects_api_url_v0 = "https://api.fablabs.io/v0/projects.json"
class RepairCafe(Lab):
"""Represents a Repair Cafe as it is described on repaircafe.org."""
def __init__(self):
self.source = "repaircafe.org"
self.lab_type = "Repair Cafe"
def data_from_repaircafe_org():
"""Gets data from repaircafe_org."""
# Use Chrome as a browser
browser = webdriver.Chrome()
# Use PhantomJS as a browser
# browser = webdriver.PhantomJS('phantomjs')
browser.get("https://repaircafe.org/en/?s=Contact+the+local+organisers")
browser.maximize_window()
# Iterate over results (the #viewmore_link button)
viewmore_button = True
while viewmore_button:
try:
viewmore = browser.find_element_by_id("viewmore_link")
# Scroll to the link in order to make it visible
browser.execute_script("arguments[0].scrollIntoView();", viewmore)
# Keep searching
viewmore.click()
except:
# If there's an error, we have reached the end of the search
viewmore_button = False
# Give a bit of time for loading the search results
sleep(2)
# Load the source code
page_source = BeautifulSoup(browser.page_source, "lxml")
# Close the browser
browser.quit()
# Parse the source code in order to find all the links under H4s
data = []
for h4 in page_source.find_all("h4"):
for a in h4.find_all('a', href=True):
data.append({"name": a.contents[0], "url": a['href']})
return data
def labs_count():
"""Gets the number of current Repair Cafes registered on repaircafe.org."""
repaircafes = data_from_repaircafe_org()
return len(repaircafes["labs"])
if __name__ == "__main__":
print get_labs(format="json")
|
openp2pdesign/makerlabs
|
makerlabs/timeline.py
|
get_multiple_data
|
python
|
def get_multiple_data():
# Get data from all the mapped platforms
all_labs = {}
all_labs["diybio_org"] = diybio_org.get_labs(format="dict")
all_labs["fablabs_io"] = fablabs_io.get_labs(format="dict")
all_labs["makeinitaly_foundation"] = makeinitaly_foundation.get_labs(
format="dict")
all_labs["hackaday_io"] = hackaday_io.get_labs(format="dict")
all_labs["hackerspaces_org"] = hackerspaces_org.get_labs(format="dict")
all_labs["makery_info"] = makery_info.get_labs(format="dict")
all_labs["nesta"] = nesta.get_labs(format="dict")
# all_labs["techshop_ws"] = techshop_ws.get_labs(format="dict")
return all_labs
|
Get data from all the platforms listed in makerlabs.
|
train
|
https://github.com/openp2pdesign/makerlabs/blob/b5838440174f10d370abb671358db9a99d7739fd/makerlabs/timeline.py#L24-L39
|
[
"def get_labs(format):\n \"\"\"Gets current UK Makerspaces data as listed by NESTA.\"\"\"\n\n ukmakerspaces_data = data_from_nesta()\n ukmakerspaces = {}\n\n # Iterate over csv rows\n for index, row in ukmakerspaces_data.iterrows():\n current_lab = UKMakerspace()\n current_lab.address_1 = row[\"Address\"].replace(\"\\r\", \" \")\n current_lab.address_2 = row[\"Region\"].replace(\"\\r\", \" \") + \" - \" + row[\"Area\"].replace(\"\\r\", \" \")\n current_lab.city = \"\"\n current_lab.county = \"\"\n current_lab.email = row[\"Email address\"]\n current_lab.latitude = \"\"\n current_lab.longitude = \"\"\n current_lab.links = \"\"\n current_lab.name = row[\"Name of makerspace\"]\n current_lab.phone = row[\"Phone number\"]\n current_lab.postal_code = row[\"Postcode\"]\n current_lab.url = row[\"Website / URL\"]\n\n # Add the lab, with a slug from the name\n ukmakerspaces[current_lab.name] = current_lab\n\n # Return a dictiornary / json\n if format.lower() == \"dict\" or format.lower() == \"json\":\n output = {}\n for j in ukmakerspaces:\n output[j] = ukmakerspaces[j].__dict__\n # Return a geojson\n elif format.lower() == \"geojson\" or format.lower() == \"geo\":\n labs_list = []\n for l in ukmakerspaces:\n single = ukmakerspaces[l].__dict__\n single_lab = Feature(\n type=\"Feature\",\n geometry=Point((single[\"latitude\"], single[\"longitude\"])),\n properties=single)\n labs_list.append(single_lab)\n output = dumps(FeatureCollection(labs_list))\n # Return a Pandas DataFrame\n elif format.lower() == \"pandas\" or format.lower() == \"dataframe\":\n output = {}\n for j in ukmakerspaces:\n output[j] = ukmakerspaces[j].__dict__\n # Transform the dict into a Pandas DataFrame\n output = pd.DataFrame.from_dict(output)\n output = output.transpose()\n # Return an object\n elif format.lower() == \"object\" or format.lower() == \"obj\":\n output = ukmakerspaces\n # Default: return an oject\n else:\n output = ukmakerspaces\n # Return a proper json\n if format.lower() == \"json\":\n output = json.dumps(output)\n return output\n",
"def get_labs(format, open_cage_api_key):\n \"\"\"Gets DIYBio Lab data from diybio.org.\"\"\"\n\n diybiolabs_soup = data_from_diybio_org()\n diybiolabs = {}\n\n rows_list = []\n continents_dict = {}\n continents_order = 0\n ranges_starting_points = []\n\n # Load all the DIYBio Labs\n # By first parsing the html\n\n # Parse table rows\n for row in diybiolabs_soup.select(\"table tr\"):\n cells = row.find_all('td')\n rows_list.append(cells)\n\n # Find the continents in order to iterate over their children td\n for k, row in enumerate(rows_list):\n for col in row:\n if col.find('h3'):\n for h3 in col.findAll('h3'):\n ranges_starting_points.append(k)\n continents_dict[continents_order] = h3.get_text()\n continents_order += 1\n\n # Find the rows of each continent\n ranges = {}\n for k, j in enumerate(reversed(ranges_starting_points)):\n if k < len(ranges_starting_points) - 1:\n ranges[k] = {\"start\": ranges_starting_points[k],\n \"end\": ranges_starting_points[k + 1]}\n else:\n # The last continent, Oceania\n ranges[k] = {\"start\": ranges_starting_points[k],\n \"end\": len(rows_list)}\n\n # Iterate over the range of each continent to find the Labs\n for i in ranges:\n # The +1 just avoids the H3 line\n for j in range(ranges[i][\"start\"] + 1, ranges[i][\"end\"]):\n # Avoid empty rows by measuring the lenght of the content of each cell and with a boolean check\n rules = [len(n) == 0 for n in rows_list[j]]\n if False in rules:\n current_lab = DiyBioLab()\n current_lab.city = rows_list[j][1].contents[0].encode('utf-8')\n # Data from the USA is not really well formatted\n if continents_dict[i] == \"USA-EAST\" or continents_dict[\n i] == \"USA-WEST\":\n current_lab.state = rows_list[j][2].contents[0].replace(\n \" \", \"\").encode('utf-8')\n else:\n current_lab.country_code = rows_list[j][2].contents[\n 0].encode('utf-8')\n current_lab.url = rows_list[j][3].contents[0].attrs['href']\n # Each lab is identified by the simplified url\n slug = current_lab.url\n if \"http://\" in slug:\n slug = slug.replace(\"http://\", \"\")\n elif \"https://\" in slug:\n slug = slug.replace(\"https://\", \"\")\n if \"www.\" in slug:\n slug = slug.replace(\"www.\", \"\")\n current_lab.name = slug\n current_lab.slug = slug\n\n # Data from the USA is not really well formatted\n if continents_dict[i] == \"USA-EAST\" or continents_dict[i] == \"USA-WEST\":\n current_lab.continent = \"North America\"\n current_lab.country_code = \"USA\"\n current_lab.country = \"United States of America\"\n current_lab.state = us.states.lookup(\n current_lab.state).name\n\n # Get address from city\n address = get_location(query=current_lab.city, format=\"direct\", api_key=open_cage_api_key)\n current_lab.continent = address[\"continent\"]\n current_lab.latitude = address[\"latitude\"]\n current_lab.longitude = address[\"longitude\"]\n current_lab.address_1 = address[\"address_1\"]\n current_lab.country = address[\"country\"]\n current_lab.country_code = address[\"country_code\"]\n current_lab.latitude = address[\"latitude\"]\n current_lab.longitude = address[\"longitude\"]\n current_lab.county = address[\"county\"]\n current_lab.postal_code = address[\"postal_code\"]\n current_lab.state = address[\"state\"]\n\n # Add the lab to the list\n diybiolabs[slug] = current_lab\n del current_lab\n\n # Return a dictionary / json\n if format.lower() == \"dict\":\n output = {}\n for j in diybiolabs:\n output[j] = diybiolabs[j].__dict__\n # Return a geojson\n elif format.lower() == \"geojson\" or format.lower() == \"geo\":\n labs_list = []\n for l in diybiolabs:\n single = diybiolabs[l].__dict__\n single_lab = Feature(\n type=\"Feature\",\n geometry=Point((single[\"latitude\"], single[\"longitude\"])),\n properties=single)\n labs_list.append(single_lab)\n output = dumps(FeatureCollection(labs_list))\n # Return a Pandas DataFrame\n elif format.lower() == \"pandas\" or format.lower() == \"dataframe\":\n output = {}\n for j in diybiolabs:\n output[j] = diybiolabs[j].__dict__\n # Transform the dict into a Pandas DataFrame\n output = pd.DataFrame.from_dict(output)\n # Put labs names as the index, to make it coherent with other APIs\n output = output.transpose()\n # Return an object\n elif format.lower() == \"object\" or format.lower() == \"obj\":\n output = diybiolabs\n # Default: return an oject\n else:\n output = diybiolabs\n # Return a proper json\n if format.lower() == \"json\":\n output = json.dumps(diybiolabs)\n return output\n",
"def get_labs(format):\n \"\"\"Gets Lab data from makery.info.\"\"\"\n\n labs_json = data_from_makery_info(makery_info_labs_api_url)\n labs = {}\n\n # Load all the FabLabs\n for i in labs_json[\"labs\"]:\n current_lab = MakeryLab()\n current_lab.address_1 = i[\"address_1\"]\n current_lab.address_2 = i[\"address_2\"]\n current_lab.address_notes = i[\"address_notes\"]\n current_lab.avatar = i[\"avatar\"]\n current_lab.blurb = i[\"blurb\"]\n current_lab.capabilities = i[\"capabilities\"]\n current_lab.city = i[\"city\"]\n current_lab.country_code = i[\"country_code\"]\n current_lab.county = i[\"county\"]\n current_lab.description = i[\"description\"]\n current_lab.email = i[\"email\"]\n current_lab.header_image_src = i[\"header_image_src\"]\n current_lab.id = i[\"id\"]\n current_lab.kind_name = i[\"kind_name\"]\n # Some labs do not have coordinates\n if i[\"latitude\"] is None or i[\"longitude\"] is None:\n address = i[\"address_1\"] + i[\"city\"] + i[\"country_code\"]\n try:\n location = geolocator.geocode(address)\n current_lab.latitude = location.latitude\n current_lab.longitude = location.longitude\n except:\n try:\n location = geolocator.geocode(i[\"city\"])\n current_lab.latitude = location.latitude\n current_lab.longitude = location.longitude\n except:\n # For labs without a city, add 0,0 as coordinates\n current_lab.latitude = 0.0\n current_lab.longitude = 0.0\n else:\n current_lab.latitude = i[\"latitude\"]\n current_lab.longitude = i[\"longitude\"]\n current_lab.links = i[\"links\"]\n current_lab.name = i[\"name\"]\n current_lab.parent_id = i[\"parent_id\"]\n current_lab.phone = i[\"phone\"]\n current_lab.postal_code = i[\"postal_code\"]\n current_lab.slug = i[\"slug\"]\n current_lab.url = i[\"url\"]\n # Add the lab\n labs[i[\"slug\"]] = current_lab\n\n # Return a dictiornary / json\n if format.lower() == \"dict\" or format.lower() == \"json\":\n output = {}\n for j in labs:\n output[j] = labs[j].__dict__\n # Return a geojson\n elif format.lower() == \"geojson\" or format.lower() == \"geo\":\n labs_list = []\n for l in labs:\n single = labs[l].__dict__\n single_lab = Feature(\n type=\"Feature\",\n geometry=Point((single[\"latitude\"], single[\"longitude\"])),\n properties=single)\n labs_list.append(single_lab)\n output = dumps(FeatureCollection(labs_list))\n # Return a Pandas DataFrame\n elif format.lower() == \"pandas\" or format.lower() == \"dataframe\":\n output = {}\n for j in labs_list:\n output[j] = labs_list[j].__dict__\n # Transform the dict into a Pandas DataFrame\n output = pd.DataFrame.from_dict(output)\n output = output.transpose()\n # Return an object\n elif format.lower() == \"object\" or format.lower() == \"obj\":\n output = labs\n # Default: return an oject\n else:\n output = labs\n # Return a proper json\n if format.lower() == \"json\":\n output = json.dumps(output)\n return output\n",
"def get_labs(format):\n \"\"\"Gets Hackerspaces data from hackaday.io.\"\"\"\n\n hackerspaces_json = data_from_hackaday_io(hackaday_io_labs_map_url)\n hackerspaces = {}\n\n # Load all the Hackerspaces\n for i in hackerspaces_json:\n current_lab = Hackerspace()\n current_lab.id = i[\"id\"]\n current_lab.url = \"https://hackaday.io/hackerspace/\" + current_lab.id\n current_lab.name = i[\"name\"]\n if len(i[\"description\"]) != 0:\n current_lab.description = i[\"description\"]\n elif len(i[\"summary\"]) != 0:\n current_lab.description = i[\"summary\"]\n current_lab.created_at = i[\"moments\"][\"exact\"]\n\n # Check if there are coordinates\n if i[\"latlon\"] is not None:\n latlon = json.loads(i[\"latlon\"])\n current_lab.latitude = latlon[\"lat\"]\n current_lab.longitude = latlon[\"lng\"]\n # Get country, county and city from them\n country = geolocator.reverse(\n [latlon[\"lat\"], latlon[\"lng\"]])\n current_lab.country = country.raw[\n \"address\"][\"country\"]\n current_lab.address = country.raw[\"display_name\"]\n current_lab.address_1 = country.raw[\"display_name\"]\n current_lab.country_code = country.raw[\n \"address\"][\"country_code\"]\n current_lab.county = country.raw[\n \"address\"][\"state_district\"]\n current_lab.city = country.raw[\n \"address\"][\"city\"]\n current_lab.postal_code = country.raw[\n \"address\"][\"postcode\"]\n else:\n # For labs without a location or coordinates\n # add 0,0 as coordinates\n current_lab.latitude = 0.0\n current_lab.longitude = 0.0\n\n # Add the lab\n hackerspaces[i[\"name\"]] = current_lab\n\n # Return a dictiornary / json\n if format.lower() == \"dict\" or format.lower() == \"json\":\n output = {}\n for j in hackerspaces:\n output[j] = hackerspaces[j].__dict__\n # Return a geojson\n elif format.lower() == \"geojson\" or format.lower() == \"geo\":\n labs_list = []\n for l in hackerspaces:\n single = hackerspaces[l].__dict__\n single_lab = Feature(\n type=\"Feature\",\n geometry=Point((single[\"latitude\"], single[\"longitude\"])),\n properties=single)\n labs_list.append(single_lab)\n output = dumps(FeatureCollection(labs_list))\n # Return a Pandas DataFrame\n elif format.lower() == \"pandas\" or format.lower() == \"dataframe\":\n output = {}\n for j in hackerspaces:\n output[j] = hackerspaces[j].__dict__\n # Transform the dict into a Pandas DataFrame\n output = pd.DataFrame.from_dict(output)\n output = output.transpose()\n # Return an object\n elif format.lower() == \"object\" or format.lower() == \"obj\":\n output = hackerspaces\n # Default: return an oject\n else:\n output = hackerspaces\n # Return a proper json\n if format.lower() == \"json\":\n output = json.dumps(output)\n return output\n",
"def get_labs(format, open_cage_api_key):\n \"\"\"Gets data from all labs from hackerspaces.org.\"\"\"\n\n labs = []\n\n # Get the first page of data\n wiki = MediaWiki(hackerspaces_org_api_url)\n wiki_response = wiki.call(\n {'action': 'query',\n 'list': 'categorymembers',\n 'cmtitle': 'Category:Hackerspace',\n 'cmlimit': '500'})\n nextpage = wiki_response[\"query-continue\"][\"categorymembers\"][\"cmcontinue\"]\n\n urls = []\n for i in wiki_response[\"query\"][\"categorymembers\"]:\n urls.append(i[\"title\"].replace(\" \", \"_\"))\n\n # Load all the Labs in the first page\n for i in urls:\n current_lab = get_single_lab(i, open_cage_api_key)\n labs.append(current_lab)\n\n # Load all the Labs from the other pages\n while \"query-continue\" in wiki_response:\n wiki = MediaWiki(hackerspaces_org_api_url)\n wiki_response = wiki.call({'action': 'query',\n 'list': 'categorymembers',\n 'cmtitle': 'Category:Hackerspace',\n 'cmlimit': '500',\n \"cmcontinue\": nextpage})\n\n urls = []\n for i in wiki_response[\"query\"][\"categorymembers\"]:\n urls.append(i[\"title\"].replace(\" \", \"_\"))\n\n # Load all the Labs\n for i in urls:\n current_lab = get_single_lab(i, open_cage_api_key)\n labs.append(current_lab)\n\n if \"query-continue\" in wiki_response:\n nextpage = wiki_response[\n \"query-continue\"][\"categorymembers\"][\"cmcontinue\"]\n else:\n break\n\n # Transform the list into a dictionary\n labs_dict = {}\n for j, k in enumerate(labs):\n labs_dict[j] = k.__dict__\n\n # Return a dictiornary / json\n if format.lower() == \"dict\" or format.lower() == \"json\":\n output = labs_dict\n # Return a geojson\n elif format.lower() == \"geojson\" or format.lower() == \"geo\":\n labs_list = []\n for l in labs_dict:\n single = labs_dict[l].__dict__\n single_lab = Feature(\n type=\"Feature\",\n geometry=Point((single[\"latitude\"], single[\"longitude\"])),\n properties=single)\n labs_list.append(single_lab)\n output = dumps(FeatureCollection(labs_list))\n # Return a Pandas DataFrame\n elif format.lower() == \"pandas\" or format.lower() == \"dataframe\":\n output = labs_dict\n # Transform the dict into a Pandas DataFrame\n output = pd.DataFrame.from_dict(output)\n output = output.transpose()\n output = output.set_index(['name'])\n # Return an object\n elif format.lower() == \"object\" or format.lower() == \"obj\":\n output = labs\n # Default: return an object\n else:\n output = labs\n # Return a proper json\n if format.lower() == \"json\":\n output = json.dumps(labs_dict)\n return output\n",
"def get_labs(format):\n \"\"\"Gets Fab Lab data from fablabs.io.\"\"\"\n\n fablabs_json = data_from_fablabs_io(fablabs_io_labs_api_url_v0)\n fablabs = {}\n\n # Load all the FabLabs\n for i in fablabs_json[\"labs\"]:\n current_lab = FabLab()\n current_lab.name = i[\"name\"]\n current_lab.address_1 = i[\"address_1\"]\n current_lab.address_2 = i[\"address_2\"]\n current_lab.address_notes = i[\"address_notes\"]\n current_lab.avatar = i[\"avatar_url\"]\n current_lab.blurb = i[\"blurb\"]\n current_lab.capabilities = i[\"capabilities\"]\n if i[\"city\"].isupper():\n i[\"city\"] = i[\"city\"].title()\n current_lab.city = i[\"city\"]\n current_lab.country_code = i[\"country_code\"]\n current_lab.county = i[\"county\"]\n current_lab.description = i[\"description\"]\n current_lab.email = i[\"email\"]\n current_lab.id = i[\"id\"]\n current_lab.phone = i[\"phone\"]\n current_lab.postal_code = i[\"postal_code\"]\n current_lab.slug = i[\"slug\"]\n current_lab.url = i[\"url\"]\n\n current_lab.continent = country_alpha2_to_continent_code(i[\"country_code\"].upper())\n current_country = pycountry.countries.get(alpha_2=i[\"country_code\"].upper())\n current_lab.country_code = current_country.alpha_3\n current_lab.country = current_country.name\n\n # Check coordinates\n if i[\"longitude\"] is not None:\n current_lab.longitude = i[\"longitude\"]\n else:\n current_lab.longitude = 0.0\n if i[\"latitude\"] is not None:\n current_lab.latitude = i[\"latitude\"]\n else:\n current_lab.latitude = 0.0\n\n # Find Facebook and Twitter links, add also the other ones\n current_lab.links = {\"facebook\": \"\", \"twitter\": \"\"}\n for link in i[\"links\"]:\n if \"facebook\" in link[\"url\"]:\n current_lab.links[\"facebook\"] = link[\"url\"]\n elif \"twitter\" in link[\"url\"]:\n current_lab.links[\"twitter\"] = link[\"url\"]\n else:\n current_lab.links[link[\"id\"]] = link[\"url\"]\n\n # Add the lab to the list\n fablabs[i[\"slug\"]] = current_lab\n\n # Return a dictiornary / json\n if format.lower() == \"dict\" or format.lower() == \"json\":\n output = {}\n for j in fablabs:\n output[j] = fablabs[j].__dict__\n # Return a geojson\n elif format.lower() == \"geojson\" or format.lower() == \"geo\":\n labs_list = []\n for l in fablabs:\n single = fablabs[l].__dict__\n single_lab = Feature(\n type=\"Feature\",\n geometry=Point((single[\"latitude\"], single[\"longitude\"])),\n properties=single)\n labs_list.append(single_lab)\n output = dumps(FeatureCollection(labs_list))\n # Return a Pandas DataFrame\n elif format.lower() == \"pandas\" or format.lower() == \"dataframe\":\n output = {}\n for j in fablabs:\n output[j] = fablabs[j].__dict__\n # Transform the dict into a Pandas DataFrame\n output = pd.DataFrame.from_dict(output)\n output = output.transpose()\n # Return an object\n elif format.lower() == \"object\" or format.lower() == \"obj\":\n output = fablabs\n # Default: return an oject\n else:\n output = fablabs\n # Return a proper json\n if format.lower() == \"json\":\n output = json.dumps(output)\n return output\n",
"def get_labs(format):\n \"\"\"Gets data from all labs from makeinitaly.foundation.\"\"\"\n\n labs = []\n\n # Get the first page of data\n wiki = MediaWiki(makeinitaly__foundation_api_url)\n wiki_response = wiki.call(\n {'action': 'query',\n 'list': 'categorymembers',\n 'cmtitle': 'Category:Italian_FabLabs',\n 'cmlimit': '500'})\n if \"query-continue\" in wiki_response:\n nextpage = wiki_response[\n \"query-continue\"][\"categorymembers\"][\"cmcontinue\"]\n\n urls = []\n for i in wiki_response[\"query\"][\"categorymembers\"]:\n urls.append(i[\"title\"].replace(\" \", \"_\"))\n\n # Load all the Labs in the first page\n for i in urls:\n current_lab = get_single_lab(i)\n labs.append(current_lab)\n\n # Load all the Labs from the other pages\n while \"query-continue\" in wiki_response:\n wiki = MediaWiki(makeinitaly__foundation_api_url)\n wiki_response = wiki.call({'action': 'query',\n 'list': 'categorymembers',\n 'cmtitle': 'Category:Hackerspace',\n 'cmlimit': '500',\n \"cmcontinue\": nextpage})\n\n urls = []\n for i in wiki_response[\"query\"][\"categorymembers\"]:\n urls.append(i[\"title\"].replace(\" \", \"_\"))\n\n # Load all the Labs\n for i in urls:\n current_lab = get_single_lab(i, data_format)\n labs.append(current_lab)\n\n if \"query-continue\" in wiki_response:\n nextpage = wiki_response[\n \"query-continue\"][\"categorymembers\"][\"cmcontinue\"]\n else:\n break\n\n # Transform the list into a dictionary\n labs_dict = {}\n for j, k in enumerate(labs):\n labs_dict[j] = k.__dict__\n\n # Return a dictiornary / json\n if format.lower() == \"dict\" or format.lower() == \"json\":\n output = labs_dict\n # Return a geojson\n elif format.lower() == \"geojson\" or format.lower() == \"geo\":\n labs_list = []\n for l in labs_dict:\n single = labs_dict[l].__dict__\n single_lab = Feature(\n type=\"Feature\",\n geometry=Point((single[\"latitude\"], single[\"longitude\"])),\n properties=single)\n labs_list.append(single_lab)\n output = dumps(FeatureCollection(labs_list))\n # Return a Pandas DataFrame\n elif format.lower() == \"pandas\" or format.lower() == \"dataframe\":\n output = {}\n for j in labs_dict:\n output[j] = labs_dict[j].__dict__\n # Transform the dict into a Pandas DataFrame\n output = pd.DataFrame.from_dict(output)\n output = output.transpose()\n # Return an object\n elif format.lower() == \"object\" or format.lower() == \"obj\":\n output = labs\n # Default: return an object\n else:\n output = labs\n # Return a proper json\n if format.lower() == \"json\":\n output = json.dumps(labs_dict)\n return output\n"
] |
# -*- encoding: utf-8 -*-
#
# Rebuild a timeline of makerlabs
#
# Author: Massimo Menichinelli
# Homepage: http://www.openp2pdesign.org
# License: LGPL v.3
#
#
# Import all the mapped platforms
import diybio_org
import fablabs_io
import makeinitaly_foundation
import hackaday_io
import hackerspaces_org
import makery_info
import nesta
import techshop_ws
import pandas as pd
def get_timeline(source):
"""Rebuild a timeline of the history of makerlabs."""
# Set up the pandas timeseries dataframe
timeline_format = ["name", "type", "source", "country", "city", "latitude",
"longitude", "website_url", "twitter_url",
"facebook_page_url", "facebook_group_url",
"whois_start", "whois_end", "wayback_start",
"wayback_end", "twitter_start", "twitter_end",
"facebook_start", "facebook_end"]
timeline = pd.DataFrame(timeline_format)
# Getdata from all the mapped platforms
if source.lower() == "diybio.org":
data = diybio_org.get_labs(format="dict")
elif source.lower() == "fablabs_io":
data = fablabs_io.get_labs(format="dict")
elif source.lower() == "makeinitaly_foundation":
data = makeinitaly_foundation.get_labs(format="dict")
elif source.lower() == "hackaday_io":
data = hackaday_io.get_labs(format="dict")
elif source.lower() == "hackerspaces_org":
data = hackerspaces_org.get_labs(format="dict")
elif source.lower() == "makery_info":
data = makery_info.get_labs(format="dict")
elif source.lower() == "nesta":
data = nesta.get_labs(format="dict")
elif source.lower() == "all":
pass
# Fill the dataframe with basic details
for lab in labs_data:
for link in lab.links:
print link
if "twitter" in link:
print link
if "facebook" in link:
print link
lab_dataframe_dict = {"name": lab.name,
"type": lab.lab_type,
"source": lab.source,
"country": lab.country,
"city": lab.city,
"latitude": lab.latitude,
"longitude": lab.longitude,
"website_url": lab.url}
timeline.append(lab_dataframe_dict)
["name", "type", "source", "country", "city", "lat", "long",
"website_url", "twitter_url", "facebook_page_url",
"facebook_group_url", "whois_start", "whois_end", "wayback_start",
"wayback_end", "twitter_start", "twitter_end", "facebook_start",
"facebook_end"]
# Get time data from platforms, whenever possible
# Get domain data (whois)
# Get subdomain data (Internet Archive)
# Get social media data (Twitter)
# Get social media data (Facebook)
return timeline
if __name__ == "__main__":
print get_timeline("fablabs_io")
|
openp2pdesign/makerlabs
|
makerlabs/timeline.py
|
get_timeline
|
python
|
def get_timeline(source):
# Set up the pandas timeseries dataframe
timeline_format = ["name", "type", "source", "country", "city", "latitude",
"longitude", "website_url", "twitter_url",
"facebook_page_url", "facebook_group_url",
"whois_start", "whois_end", "wayback_start",
"wayback_end", "twitter_start", "twitter_end",
"facebook_start", "facebook_end"]
timeline = pd.DataFrame(timeline_format)
# Getdata from all the mapped platforms
if source.lower() == "diybio.org":
data = diybio_org.get_labs(format="dict")
elif source.lower() == "fablabs_io":
data = fablabs_io.get_labs(format="dict")
elif source.lower() == "makeinitaly_foundation":
data = makeinitaly_foundation.get_labs(format="dict")
elif source.lower() == "hackaday_io":
data = hackaday_io.get_labs(format="dict")
elif source.lower() == "hackerspaces_org":
data = hackerspaces_org.get_labs(format="dict")
elif source.lower() == "makery_info":
data = makery_info.get_labs(format="dict")
elif source.lower() == "nesta":
data = nesta.get_labs(format="dict")
elif source.lower() == "all":
pass
# Fill the dataframe with basic details
for lab in labs_data:
for link in lab.links:
print link
if "twitter" in link:
print link
if "facebook" in link:
print link
lab_dataframe_dict = {"name": lab.name,
"type": lab.lab_type,
"source": lab.source,
"country": lab.country,
"city": lab.city,
"latitude": lab.latitude,
"longitude": lab.longitude,
"website_url": lab.url}
timeline.append(lab_dataframe_dict)
["name", "type", "source", "country", "city", "lat", "long",
"website_url", "twitter_url", "facebook_page_url",
"facebook_group_url", "whois_start", "whois_end", "wayback_start",
"wayback_end", "twitter_start", "twitter_end", "facebook_start",
"facebook_end"]
# Get time data from platforms, whenever possible
# Get domain data (whois)
# Get subdomain data (Internet Archive)
# Get social media data (Twitter)
# Get social media data (Facebook)
return timeline
|
Rebuild a timeline of the history of makerlabs.
|
train
|
https://github.com/openp2pdesign/makerlabs/blob/b5838440174f10d370abb671358db9a99d7739fd/makerlabs/timeline.py#L42-L105
|
[
"def get_labs(format, open_cage_api_key):\n \"\"\"Gets DIYBio Lab data from diybio.org.\"\"\"\n\n diybiolabs_soup = data_from_diybio_org()\n diybiolabs = {}\n\n rows_list = []\n continents_dict = {}\n continents_order = 0\n ranges_starting_points = []\n\n # Load all the DIYBio Labs\n # By first parsing the html\n\n # Parse table rows\n for row in diybiolabs_soup.select(\"table tr\"):\n cells = row.find_all('td')\n rows_list.append(cells)\n\n # Find the continents in order to iterate over their children td\n for k, row in enumerate(rows_list):\n for col in row:\n if col.find('h3'):\n for h3 in col.findAll('h3'):\n ranges_starting_points.append(k)\n continents_dict[continents_order] = h3.get_text()\n continents_order += 1\n\n # Find the rows of each continent\n ranges = {}\n for k, j in enumerate(reversed(ranges_starting_points)):\n if k < len(ranges_starting_points) - 1:\n ranges[k] = {\"start\": ranges_starting_points[k],\n \"end\": ranges_starting_points[k + 1]}\n else:\n # The last continent, Oceania\n ranges[k] = {\"start\": ranges_starting_points[k],\n \"end\": len(rows_list)}\n\n # Iterate over the range of each continent to find the Labs\n for i in ranges:\n # The +1 just avoids the H3 line\n for j in range(ranges[i][\"start\"] + 1, ranges[i][\"end\"]):\n # Avoid empty rows by measuring the lenght of the content of each cell and with a boolean check\n rules = [len(n) == 0 for n in rows_list[j]]\n if False in rules:\n current_lab = DiyBioLab()\n current_lab.city = rows_list[j][1].contents[0].encode('utf-8')\n # Data from the USA is not really well formatted\n if continents_dict[i] == \"USA-EAST\" or continents_dict[\n i] == \"USA-WEST\":\n current_lab.state = rows_list[j][2].contents[0].replace(\n \" \", \"\").encode('utf-8')\n else:\n current_lab.country_code = rows_list[j][2].contents[\n 0].encode('utf-8')\n current_lab.url = rows_list[j][3].contents[0].attrs['href']\n # Each lab is identified by the simplified url\n slug = current_lab.url\n if \"http://\" in slug:\n slug = slug.replace(\"http://\", \"\")\n elif \"https://\" in slug:\n slug = slug.replace(\"https://\", \"\")\n if \"www.\" in slug:\n slug = slug.replace(\"www.\", \"\")\n current_lab.name = slug\n current_lab.slug = slug\n\n # Data from the USA is not really well formatted\n if continents_dict[i] == \"USA-EAST\" or continents_dict[i] == \"USA-WEST\":\n current_lab.continent = \"North America\"\n current_lab.country_code = \"USA\"\n current_lab.country = \"United States of America\"\n current_lab.state = us.states.lookup(\n current_lab.state).name\n\n # Get address from city\n address = get_location(query=current_lab.city, format=\"direct\", api_key=open_cage_api_key)\n current_lab.continent = address[\"continent\"]\n current_lab.latitude = address[\"latitude\"]\n current_lab.longitude = address[\"longitude\"]\n current_lab.address_1 = address[\"address_1\"]\n current_lab.country = address[\"country\"]\n current_lab.country_code = address[\"country_code\"]\n current_lab.latitude = address[\"latitude\"]\n current_lab.longitude = address[\"longitude\"]\n current_lab.county = address[\"county\"]\n current_lab.postal_code = address[\"postal_code\"]\n current_lab.state = address[\"state\"]\n\n # Add the lab to the list\n diybiolabs[slug] = current_lab\n del current_lab\n\n # Return a dictionary / json\n if format.lower() == \"dict\":\n output = {}\n for j in diybiolabs:\n output[j] = diybiolabs[j].__dict__\n # Return a geojson\n elif format.lower() == \"geojson\" or format.lower() == \"geo\":\n labs_list = []\n for l in diybiolabs:\n single = diybiolabs[l].__dict__\n single_lab = Feature(\n type=\"Feature\",\n geometry=Point((single[\"latitude\"], single[\"longitude\"])),\n properties=single)\n labs_list.append(single_lab)\n output = dumps(FeatureCollection(labs_list))\n # Return a Pandas DataFrame\n elif format.lower() == \"pandas\" or format.lower() == \"dataframe\":\n output = {}\n for j in diybiolabs:\n output[j] = diybiolabs[j].__dict__\n # Transform the dict into a Pandas DataFrame\n output = pd.DataFrame.from_dict(output)\n # Put labs names as the index, to make it coherent with other APIs\n output = output.transpose()\n # Return an object\n elif format.lower() == \"object\" or format.lower() == \"obj\":\n output = diybiolabs\n # Default: return an oject\n else:\n output = diybiolabs\n # Return a proper json\n if format.lower() == \"json\":\n output = json.dumps(diybiolabs)\n return output\n"
] |
# -*- encoding: utf-8 -*-
#
# Rebuild a timeline of makerlabs
#
# Author: Massimo Menichinelli
# Homepage: http://www.openp2pdesign.org
# License: LGPL v.3
#
#
# Import all the mapped platforms
import diybio_org
import fablabs_io
import makeinitaly_foundation
import hackaday_io
import hackerspaces_org
import makery_info
import nesta
import techshop_ws
import pandas as pd
def get_multiple_data():
"""Get data from all the platforms listed in makerlabs."""
# Get data from all the mapped platforms
all_labs = {}
all_labs["diybio_org"] = diybio_org.get_labs(format="dict")
all_labs["fablabs_io"] = fablabs_io.get_labs(format="dict")
all_labs["makeinitaly_foundation"] = makeinitaly_foundation.get_labs(
format="dict")
all_labs["hackaday_io"] = hackaday_io.get_labs(format="dict")
all_labs["hackerspaces_org"] = hackerspaces_org.get_labs(format="dict")
all_labs["makery_info"] = makery_info.get_labs(format="dict")
all_labs["nesta"] = nesta.get_labs(format="dict")
# all_labs["techshop_ws"] = techshop_ws.get_labs(format="dict")
return all_labs
if __name__ == "__main__":
print get_timeline("fablabs_io")
|
openp2pdesign/makerlabs
|
makerlabs/nesta.py
|
get_labs
|
python
|
def get_labs(format):
ukmakerspaces_data = data_from_nesta()
ukmakerspaces = {}
# Iterate over csv rows
for index, row in ukmakerspaces_data.iterrows():
current_lab = UKMakerspace()
current_lab.address_1 = row["Address"].replace("\r", " ")
current_lab.address_2 = row["Region"].replace("\r", " ") + " - " + row["Area"].replace("\r", " ")
current_lab.city = ""
current_lab.county = ""
current_lab.email = row["Email address"]
current_lab.latitude = ""
current_lab.longitude = ""
current_lab.links = ""
current_lab.name = row["Name of makerspace"]
current_lab.phone = row["Phone number"]
current_lab.postal_code = row["Postcode"]
current_lab.url = row["Website / URL"]
# Add the lab, with a slug from the name
ukmakerspaces[current_lab.name] = current_lab
# Return a dictiornary / json
if format.lower() == "dict" or format.lower() == "json":
output = {}
for j in ukmakerspaces:
output[j] = ukmakerspaces[j].__dict__
# Return a geojson
elif format.lower() == "geojson" or format.lower() == "geo":
labs_list = []
for l in ukmakerspaces:
single = ukmakerspaces[l].__dict__
single_lab = Feature(
type="Feature",
geometry=Point((single["latitude"], single["longitude"])),
properties=single)
labs_list.append(single_lab)
output = dumps(FeatureCollection(labs_list))
# Return a Pandas DataFrame
elif format.lower() == "pandas" or format.lower() == "dataframe":
output = {}
for j in ukmakerspaces:
output[j] = ukmakerspaces[j].__dict__
# Transform the dict into a Pandas DataFrame
output = pd.DataFrame.from_dict(output)
output = output.transpose()
# Return an object
elif format.lower() == "object" or format.lower() == "obj":
output = ukmakerspaces
# Default: return an oject
else:
output = ukmakerspaces
# Return a proper json
if format.lower() == "json":
output = json.dumps(output)
return output
|
Gets current UK Makerspaces data as listed by NESTA.
|
train
|
https://github.com/openp2pdesign/makerlabs/blob/b5838440174f10d370abb671358db9a99d7739fd/makerlabs/nesta.py#L47-L105
|
[
"def data_from_nesta():\n \"\"\"Read data from the GitHub repo.\"\"\"\n\n data = pd.read_csv(nesta_uk_url)\n\n return data\n"
] |
# -*- encoding: utf-8 -*-
#
# Access data from NESTA at https://github.com/nesta-uk/UK-makerspaces
# Data license: CC Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)
#
# Author: Massimo Menichinelli
# Homepage: http://www.openp2pdesign.org
# License: LGPL v.3
#
#
from classes import Lab
import json
import pandas as pd
from geojson import dumps, Feature, Point, FeatureCollection
from geopy.geocoders import Nominatim
import pandas as pd
# Geocoding variable
geolocator = Nominatim()
# Endpoints
nesta_uk_url = "https://raw.githubusercontent.com/nesta-uk/UK-makerspaces/master/ukmakerspacesidentifiabledata.csv"
class UKMakerspace(Lab):
"""Represents a UK Makerspace from the NESTA research, in a simplified way."""
def __init__(self):
self.source = "NESTA"
self.lab_type = "UK Makerspace from the NESTA"
self.continent = "Europe"
self.country_code = "UK"
self.country = "United Kingdom"
def data_from_nesta():
"""Read data from the GitHub repo."""
data = pd.read_csv(nesta_uk_url)
return data
def labs_count():
"""Gets the number of current UK Makerspaces listed by NESTA."""
ukmakerspaces = get_labs("object")
return len(ukmakerspaces)
if __name__ == "__main__":
pass
|
openp2pdesign/makerlabs
|
makerlabs/makery_info.py
|
get_labs
|
python
|
def get_labs(format):
labs_json = data_from_makery_info(makery_info_labs_api_url)
labs = {}
# Load all the FabLabs
for i in labs_json["labs"]:
current_lab = MakeryLab()
current_lab.address_1 = i["address_1"]
current_lab.address_2 = i["address_2"]
current_lab.address_notes = i["address_notes"]
current_lab.avatar = i["avatar"]
current_lab.blurb = i["blurb"]
current_lab.capabilities = i["capabilities"]
current_lab.city = i["city"]
current_lab.country_code = i["country_code"]
current_lab.county = i["county"]
current_lab.description = i["description"]
current_lab.email = i["email"]
current_lab.header_image_src = i["header_image_src"]
current_lab.id = i["id"]
current_lab.kind_name = i["kind_name"]
# Some labs do not have coordinates
if i["latitude"] is None or i["longitude"] is None:
address = i["address_1"] + i["city"] + i["country_code"]
try:
location = geolocator.geocode(address)
current_lab.latitude = location.latitude
current_lab.longitude = location.longitude
except:
try:
location = geolocator.geocode(i["city"])
current_lab.latitude = location.latitude
current_lab.longitude = location.longitude
except:
# For labs without a city, add 0,0 as coordinates
current_lab.latitude = 0.0
current_lab.longitude = 0.0
else:
current_lab.latitude = i["latitude"]
current_lab.longitude = i["longitude"]
current_lab.links = i["links"]
current_lab.name = i["name"]
current_lab.parent_id = i["parent_id"]
current_lab.phone = i["phone"]
current_lab.postal_code = i["postal_code"]
current_lab.slug = i["slug"]
current_lab.url = i["url"]
# Add the lab
labs[i["slug"]] = current_lab
# Return a dictiornary / json
if format.lower() == "dict" or format.lower() == "json":
output = {}
for j in labs:
output[j] = labs[j].__dict__
# Return a geojson
elif format.lower() == "geojson" or format.lower() == "geo":
labs_list = []
for l in labs:
single = labs[l].__dict__
single_lab = Feature(
type="Feature",
geometry=Point((single["latitude"], single["longitude"])),
properties=single)
labs_list.append(single_lab)
output = dumps(FeatureCollection(labs_list))
# Return a Pandas DataFrame
elif format.lower() == "pandas" or format.lower() == "dataframe":
output = {}
for j in labs_list:
output[j] = labs_list[j].__dict__
# Transform the dict into a Pandas DataFrame
output = pd.DataFrame.from_dict(output)
output = output.transpose()
# Return an object
elif format.lower() == "object" or format.lower() == "obj":
output = labs
# Default: return an oject
else:
output = labs
# Return a proper json
if format.lower() == "json":
output = json.dumps(output)
return output
|
Gets Lab data from makery.info.
|
train
|
https://github.com/openp2pdesign/makerlabs/blob/b5838440174f10d370abb671358db9a99d7739fd/makerlabs/makery_info.py#L44-L129
|
[
"def data_from_makery_info(endpoint):\n \"\"\"Gets data from makery.info.\"\"\"\n\n data = requests.get(endpoint).json()\n\n return data\n"
] |
# -*- encoding: utf-8 -*-
#
# Access data from makery.info
#
# Author: Massimo Menichinelli
# Homepage: http://www.openp2pdesign.org
# License: LGPL v.3
#
#
from classes import Lab
import json
import requests
from geojson import dumps, Feature, Point, FeatureCollection
from geopy.geocoders import Nominatim
import pandas as pd
# Geocoding variable
geolocator = Nominatim()
# Endpoints
makery_info_labs_api_url = "http://www.makery.info/api/labs/"
class MakeryLab(Lab):
"""Represents a Lab as it is described on makery.info."""
def __init__(self):
self.source = "makery.info"
self.lab_type = "Lab on makery.info"
def data_from_makery_info(endpoint):
"""Gets data from makery.info."""
data = requests.get(endpoint).json()
return data
def labs_count():
"""Gets the number of current Labs listed on makery.info."""
labs = data_from_makery_info(makery_info_labs_api_url)
return len(labs["labs"])
if __name__ == "__main__":
pass
|
openp2pdesign/makerlabs
|
makerlabs/hackaday_io.py
|
get_labs
|
python
|
def get_labs(format):
hackerspaces_json = data_from_hackaday_io(hackaday_io_labs_map_url)
hackerspaces = {}
# Load all the Hackerspaces
for i in hackerspaces_json:
current_lab = Hackerspace()
current_lab.id = i["id"]
current_lab.url = "https://hackaday.io/hackerspace/" + current_lab.id
current_lab.name = i["name"]
if len(i["description"]) != 0:
current_lab.description = i["description"]
elif len(i["summary"]) != 0:
current_lab.description = i["summary"]
current_lab.created_at = i["moments"]["exact"]
# Check if there are coordinates
if i["latlon"] is not None:
latlon = json.loads(i["latlon"])
current_lab.latitude = latlon["lat"]
current_lab.longitude = latlon["lng"]
# Get country, county and city from them
country = geolocator.reverse(
[latlon["lat"], latlon["lng"]])
current_lab.country = country.raw[
"address"]["country"]
current_lab.address = country.raw["display_name"]
current_lab.address_1 = country.raw["display_name"]
current_lab.country_code = country.raw[
"address"]["country_code"]
current_lab.county = country.raw[
"address"]["state_district"]
current_lab.city = country.raw[
"address"]["city"]
current_lab.postal_code = country.raw[
"address"]["postcode"]
else:
# For labs without a location or coordinates
# add 0,0 as coordinates
current_lab.latitude = 0.0
current_lab.longitude = 0.0
# Add the lab
hackerspaces[i["name"]] = current_lab
# Return a dictiornary / json
if format.lower() == "dict" or format.lower() == "json":
output = {}
for j in hackerspaces:
output[j] = hackerspaces[j].__dict__
# Return a geojson
elif format.lower() == "geojson" or format.lower() == "geo":
labs_list = []
for l in hackerspaces:
single = hackerspaces[l].__dict__
single_lab = Feature(
type="Feature",
geometry=Point((single["latitude"], single["longitude"])),
properties=single)
labs_list.append(single_lab)
output = dumps(FeatureCollection(labs_list))
# Return a Pandas DataFrame
elif format.lower() == "pandas" or format.lower() == "dataframe":
output = {}
for j in hackerspaces:
output[j] = hackerspaces[j].__dict__
# Transform the dict into a Pandas DataFrame
output = pd.DataFrame.from_dict(output)
output = output.transpose()
# Return an object
elif format.lower() == "object" or format.lower() == "obj":
output = hackerspaces
# Default: return an oject
else:
output = hackerspaces
# Return a proper json
if format.lower() == "json":
output = json.dumps(output)
return output
|
Gets Hackerspaces data from hackaday.io.
|
train
|
https://github.com/openp2pdesign/makerlabs/blob/b5838440174f10d370abb671358db9a99d7739fd/makerlabs/hackaday_io.py#L57-L137
|
[
"def data_from_hackaday_io(endpoint):\n \"\"\"Gets data from hackaday.io.\"\"\"\n\n data = requests.get(endpoint).json()\n\n return data\n"
] |
# -*- encoding: utf-8 -*-
#
# Access data from hackaday.io
#
# Author: Massimo Menichinelli
# Homepage: http://www.openp2pdesign.org
# License: LGPL v.3
#
#
from classes import Lab
import json
import requests
from geojson import dumps, Feature, Point, FeatureCollection
from geopy.geocoders import Nominatim
import pandas as pd
# Geocoding variable
geolocator = Nominatim()
# Endpoints
# The documented endpoint does not have coordinates,
# the undocumented one has them, so for the moment we use the latter.
# The undocumented endpoint does not need API keys or OAuth.
# hackaday.io API documentation:
# https://dev.hackaday.io/doc/api/get-pages
# Register your app for the API key here:
# https://dev.hackaday.io/applications
client_id = "..."
client_secret = "..."
API_key = "..."
# Documented endpoint for the list of hackerspaces
hackaday_io_labs_api_url = "https://api.hackaday.io/v1/pages/hackerspaces?api_key=" + API_key
# Undocumented endpoint for the map of hackerspaces
hackaday_io_labs_map_url = "http://hackaday.io/api/location/hackerspaces"
class Hackerspace(Lab):
"""Represents a Hackerspace as it is described on hackaday.io."""
def __init__(self):
self.source = "hackaday.io"
self.lab_type = "Hackerspace"
def data_from_hackaday_io(endpoint):
"""Gets data from hackaday.io."""
data = requests.get(endpoint).json()
return data
def labs_count():
"""Gets the number of current Hackerspaces listed on hackaday.io."""
hackerspaces = data_from_hackaday_io(hackaday_io_labs_api_url)
return len(hackerspaces["labs"])
if __name__ == "__main__":
pass
|
openp2pdesign/makerlabs
|
makerlabs/diybio_org.py
|
data_from_diybio_org
|
python
|
def data_from_diybio_org():
r = requests.get(diy_bio_labs_url)
if r.status_code == 200:
# Fix a problem in the html source while loading it
data = BeautifulSoup(r.text.replace(u'\xa0', u''), "lxml")
else:
data = "There was an error while accessing data on diybio.org."
return data
|
Scrapes data from diybio.org.
|
train
|
https://github.com/openp2pdesign/makerlabs/blob/b5838440174f10d370abb671358db9a99d7739fd/makerlabs/diybio_org.py#L34-L45
| null |
# -*- encoding: utf-8 -*-
#
# Access data from diybio.org
#
# Author: Massimo Menichinelli
# Homepage: http://www.openp2pdesign.org
# License: LGPL v.3
#
#
from classes import Lab
from utils import get_location
import json
from bs4 import BeautifulSoup
import requests
from geojson import dumps, Feature, Point, FeatureCollection
import us
import pandas as pd
# Endpoints
diy_bio_labs_url = "https://diybio.org/local/"
class DiyBioLab(Lab):
"""Represents a DIYBio Lab as it is described on diybio.org."""
def __init__(self):
self.source = "diybio.org"
self.lab_type = "DIYBio Lab"
def get_labs(format, open_cage_api_key):
"""Gets DIYBio Lab data from diybio.org."""
diybiolabs_soup = data_from_diybio_org()
diybiolabs = {}
rows_list = []
continents_dict = {}
continents_order = 0
ranges_starting_points = []
# Load all the DIYBio Labs
# By first parsing the html
# Parse table rows
for row in diybiolabs_soup.select("table tr"):
cells = row.find_all('td')
rows_list.append(cells)
# Find the continents in order to iterate over their children td
for k, row in enumerate(rows_list):
for col in row:
if col.find('h3'):
for h3 in col.findAll('h3'):
ranges_starting_points.append(k)
continents_dict[continents_order] = h3.get_text()
continents_order += 1
# Find the rows of each continent
ranges = {}
for k, j in enumerate(reversed(ranges_starting_points)):
if k < len(ranges_starting_points) - 1:
ranges[k] = {"start": ranges_starting_points[k],
"end": ranges_starting_points[k + 1]}
else:
# The last continent, Oceania
ranges[k] = {"start": ranges_starting_points[k],
"end": len(rows_list)}
# Iterate over the range of each continent to find the Labs
for i in ranges:
# The +1 just avoids the H3 line
for j in range(ranges[i]["start"] + 1, ranges[i]["end"]):
# Avoid empty rows by measuring the lenght of the content of each cell and with a boolean check
rules = [len(n) == 0 for n in rows_list[j]]
if False in rules:
current_lab = DiyBioLab()
current_lab.city = rows_list[j][1].contents[0].encode('utf-8')
# Data from the USA is not really well formatted
if continents_dict[i] == "USA-EAST" or continents_dict[
i] == "USA-WEST":
current_lab.state = rows_list[j][2].contents[0].replace(
" ", "").encode('utf-8')
else:
current_lab.country_code = rows_list[j][2].contents[
0].encode('utf-8')
current_lab.url = rows_list[j][3].contents[0].attrs['href']
# Each lab is identified by the simplified url
slug = current_lab.url
if "http://" in slug:
slug = slug.replace("http://", "")
elif "https://" in slug:
slug = slug.replace("https://", "")
if "www." in slug:
slug = slug.replace("www.", "")
current_lab.name = slug
current_lab.slug = slug
# Data from the USA is not really well formatted
if continents_dict[i] == "USA-EAST" or continents_dict[i] == "USA-WEST":
current_lab.continent = "North America"
current_lab.country_code = "USA"
current_lab.country = "United States of America"
current_lab.state = us.states.lookup(
current_lab.state).name
# Get address from city
address = get_location(query=current_lab.city, format="direct", api_key=open_cage_api_key)
current_lab.continent = address["continent"]
current_lab.latitude = address["latitude"]
current_lab.longitude = address["longitude"]
current_lab.address_1 = address["address_1"]
current_lab.country = address["country"]
current_lab.country_code = address["country_code"]
current_lab.latitude = address["latitude"]
current_lab.longitude = address["longitude"]
current_lab.county = address["county"]
current_lab.postal_code = address["postal_code"]
current_lab.state = address["state"]
# Add the lab to the list
diybiolabs[slug] = current_lab
del current_lab
# Return a dictionary / json
if format.lower() == "dict":
output = {}
for j in diybiolabs:
output[j] = diybiolabs[j].__dict__
# Return a geojson
elif format.lower() == "geojson" or format.lower() == "geo":
labs_list = []
for l in diybiolabs:
single = diybiolabs[l].__dict__
single_lab = Feature(
type="Feature",
geometry=Point((single["latitude"], single["longitude"])),
properties=single)
labs_list.append(single_lab)
output = dumps(FeatureCollection(labs_list))
# Return a Pandas DataFrame
elif format.lower() == "pandas" or format.lower() == "dataframe":
output = {}
for j in diybiolabs:
output[j] = diybiolabs[j].__dict__
# Transform the dict into a Pandas DataFrame
output = pd.DataFrame.from_dict(output)
# Put labs names as the index, to make it coherent with other APIs
output = output.transpose()
# Return an object
elif format.lower() == "object" or format.lower() == "obj":
output = diybiolabs
# Default: return an oject
else:
output = diybiolabs
# Return a proper json
if format.lower() == "json":
output = json.dumps(diybiolabs)
return output
def labs_count():
"""Gets the number of current DIYBio Labs listed on diybio.org."""
diybiolabs = get_labs("object")
return len(diybiolabs)
if __name__ == "__main__":
pass
|
openp2pdesign/makerlabs
|
makerlabs/diybio_org.py
|
get_labs
|
python
|
def get_labs(format, open_cage_api_key):
diybiolabs_soup = data_from_diybio_org()
diybiolabs = {}
rows_list = []
continents_dict = {}
continents_order = 0
ranges_starting_points = []
# Load all the DIYBio Labs
# By first parsing the html
# Parse table rows
for row in diybiolabs_soup.select("table tr"):
cells = row.find_all('td')
rows_list.append(cells)
# Find the continents in order to iterate over their children td
for k, row in enumerate(rows_list):
for col in row:
if col.find('h3'):
for h3 in col.findAll('h3'):
ranges_starting_points.append(k)
continents_dict[continents_order] = h3.get_text()
continents_order += 1
# Find the rows of each continent
ranges = {}
for k, j in enumerate(reversed(ranges_starting_points)):
if k < len(ranges_starting_points) - 1:
ranges[k] = {"start": ranges_starting_points[k],
"end": ranges_starting_points[k + 1]}
else:
# The last continent, Oceania
ranges[k] = {"start": ranges_starting_points[k],
"end": len(rows_list)}
# Iterate over the range of each continent to find the Labs
for i in ranges:
# The +1 just avoids the H3 line
for j in range(ranges[i]["start"] + 1, ranges[i]["end"]):
# Avoid empty rows by measuring the lenght of the content of each cell and with a boolean check
rules = [len(n) == 0 for n in rows_list[j]]
if False in rules:
current_lab = DiyBioLab()
current_lab.city = rows_list[j][1].contents[0].encode('utf-8')
# Data from the USA is not really well formatted
if continents_dict[i] == "USA-EAST" or continents_dict[
i] == "USA-WEST":
current_lab.state = rows_list[j][2].contents[0].replace(
" ", "").encode('utf-8')
else:
current_lab.country_code = rows_list[j][2].contents[
0].encode('utf-8')
current_lab.url = rows_list[j][3].contents[0].attrs['href']
# Each lab is identified by the simplified url
slug = current_lab.url
if "http://" in slug:
slug = slug.replace("http://", "")
elif "https://" in slug:
slug = slug.replace("https://", "")
if "www." in slug:
slug = slug.replace("www.", "")
current_lab.name = slug
current_lab.slug = slug
# Data from the USA is not really well formatted
if continents_dict[i] == "USA-EAST" or continents_dict[i] == "USA-WEST":
current_lab.continent = "North America"
current_lab.country_code = "USA"
current_lab.country = "United States of America"
current_lab.state = us.states.lookup(
current_lab.state).name
# Get address from city
address = get_location(query=current_lab.city, format="direct", api_key=open_cage_api_key)
current_lab.continent = address["continent"]
current_lab.latitude = address["latitude"]
current_lab.longitude = address["longitude"]
current_lab.address_1 = address["address_1"]
current_lab.country = address["country"]
current_lab.country_code = address["country_code"]
current_lab.latitude = address["latitude"]
current_lab.longitude = address["longitude"]
current_lab.county = address["county"]
current_lab.postal_code = address["postal_code"]
current_lab.state = address["state"]
# Add the lab to the list
diybiolabs[slug] = current_lab
del current_lab
# Return a dictionary / json
if format.lower() == "dict":
output = {}
for j in diybiolabs:
output[j] = diybiolabs[j].__dict__
# Return a geojson
elif format.lower() == "geojson" or format.lower() == "geo":
labs_list = []
for l in diybiolabs:
single = diybiolabs[l].__dict__
single_lab = Feature(
type="Feature",
geometry=Point((single["latitude"], single["longitude"])),
properties=single)
labs_list.append(single_lab)
output = dumps(FeatureCollection(labs_list))
# Return a Pandas DataFrame
elif format.lower() == "pandas" or format.lower() == "dataframe":
output = {}
for j in diybiolabs:
output[j] = diybiolabs[j].__dict__
# Transform the dict into a Pandas DataFrame
output = pd.DataFrame.from_dict(output)
# Put labs names as the index, to make it coherent with other APIs
output = output.transpose()
# Return an object
elif format.lower() == "object" or format.lower() == "obj":
output = diybiolabs
# Default: return an oject
else:
output = diybiolabs
# Return a proper json
if format.lower() == "json":
output = json.dumps(diybiolabs)
return output
|
Gets DIYBio Lab data from diybio.org.
|
train
|
https://github.com/openp2pdesign/makerlabs/blob/b5838440174f10d370abb671358db9a99d7739fd/makerlabs/diybio_org.py#L48-L176
|
[
"def get_location(query, format, api_key):\n \"\"\"Get geographic data of a lab in a coherent way for all labs.\"\"\"\n\n # Play nice with the API...\n sleep(1)\n geolocator = OpenCage(api_key=api_key, timeout=10)\n\n # Variables for storing the data\n data = {\"city\": None,\n \"address_1\": None,\n \"postal_code\": None,\n \"country\": None,\n \"county\": None,\n \"state\": None,\n \"country_code\": None,\n \"latitude\": None,\n \"longitude\": None,\n \"continent\": None}\n road = \"\"\n number = \"\"\n # Default None values\n location_data = {\"city\": None,\n \"road\": None,\n \"house_number\": None,\n \"postcode\": None,\n \"country\": None,\n \"county\": None,\n \"state\": None,\n \"ISO_3166-1_alpha-2\": None,\n \"country_code\": None,\n \"lat\": None,\n \"lng\": None}\n\n # Reverse geocoding ... from coordinates to address\n if format == \"reverse\":\n # If the query (coordinates) is not empty\n if query is None or len(query) < 3:\n pass\n else:\n location = geolocator.reverse(query)\n if location is not None:\n location_data = location[0].raw[u'components']\n location_data[\"lat\"] = location[0].raw[u'geometry'][\"lat\"]\n location_data[\"lng\"] = location[0].raw[u'geometry'][\"lng\"]\n # Direct geocoding ... from address to coordinates and full address\n if format == \"direct\":\n # If the query (address) is not empty\n if query is None or len(query) < 3:\n pass\n else:\n location = geolocator.geocode(query)\n if location is not None:\n location_data = location.raw[u'components']\n location_data[\"lat\"] = location.raw[u'geometry'][\"lat\"]\n location_data[\"lng\"] = location.raw[u'geometry'][\"lng\"]\n\n # Extract the meaningful data\n for component in location_data:\n if component == \"town\" or component == \"city\":\n data[\"city\"] = location_data[component]\n if component == \"road\":\n road = location_data[component]\n if component == \"house_number\":\n number = location_data[component]\n if component == \"postcode\":\n data[\"postal_code\"] = location_data[component]\n if component == \"country\":\n data[\"country\"] = location_data[component]\n if component == \"county\":\n data[\"county\"] = location_data[component]\n if component == \"state\":\n data[\"state\"] = location_data[component]\n if component == \"ISO_3166-1_alpha-2\":\n data[\"country_code\"] = location_data[component]\n # The address need to be reconstructed\n data[\"address_1\"] = unicode(road) + \" \" + unicode(number)\n data[\"latitude\"] = location_data[\"lat\"]\n data[\"longitude\"] = location_data[\"lng\"]\n # Format the country code to three letters\n try:\n country_data = transformations.cca2_to_ccn(data[\"country_code\"])\n data[\"country_code\"] = transformations.ccn_to_cca3(country_data)\n except:\n data[\"country_code\"] = None\n # Get the continent\n try:\n country_data = transformations.cc_to_cn(data[\"country_code\"])\n data[\"continent\"] = transformations.cn_to_ctn(country_data)\n except:\n data[\"continent\"] = None\n\n # Return the final data\n return data\n",
"def data_from_diybio_org():\n \"\"\"Scrapes data from diybio.org.\"\"\"\n\n r = requests.get(diy_bio_labs_url)\n\n if r.status_code == 200:\n # Fix a problem in the html source while loading it\n data = BeautifulSoup(r.text.replace(u'\\xa0', u''), \"lxml\")\n else:\n data = \"There was an error while accessing data on diybio.org.\"\n\n return data\n"
] |
# -*- encoding: utf-8 -*-
#
# Access data from diybio.org
#
# Author: Massimo Menichinelli
# Homepage: http://www.openp2pdesign.org
# License: LGPL v.3
#
#
from classes import Lab
from utils import get_location
import json
from bs4 import BeautifulSoup
import requests
from geojson import dumps, Feature, Point, FeatureCollection
import us
import pandas as pd
# Endpoints
diy_bio_labs_url = "https://diybio.org/local/"
class DiyBioLab(Lab):
"""Represents a DIYBio Lab as it is described on diybio.org."""
def __init__(self):
self.source = "diybio.org"
self.lab_type = "DIYBio Lab"
def data_from_diybio_org():
"""Scrapes data from diybio.org."""
r = requests.get(diy_bio_labs_url)
if r.status_code == 200:
# Fix a problem in the html source while loading it
data = BeautifulSoup(r.text.replace(u'\xa0', u''), "lxml")
else:
data = "There was an error while accessing data on diybio.org."
return data
def labs_count():
"""Gets the number of current DIYBio Labs listed on diybio.org."""
diybiolabs = get_labs("object")
return len(diybiolabs)
if __name__ == "__main__":
pass
|
openp2pdesign/makerlabs
|
makerlabs/techshop_ws.py
|
data_from_techshop_ws
|
python
|
def data_from_techshop_ws(tws_url):
r = requests.get(tws_url)
if r.status_code == 200:
data = BeautifulSoup(r.text, "lxml")
else:
data = "There was an error while accessing data on techshop.ws."
return data
|
Scrapes data from techshop.ws.
|
train
|
https://github.com/openp2pdesign/makerlabs/blob/b5838440174f10d370abb671358db9a99d7739fd/makerlabs/techshop_ws.py#L38-L47
| null |
# -*- encoding: utf-8 -*-
#
# Access data from techshop.ws
#
# Author: Massimo Menichinelli
# Homepage: http://www.openp2pdesign.org
# License: LGPL v.3
#
#
from classes import Lab
import json
from bs4 import BeautifulSoup
import requests
from geojson import dumps, Feature, Point, FeatureCollection
from geopy.geocoders import Nominatim
import pandas as pd
# Geocoding variable
geolocator = Nominatim()
# Endpoints
techshop_us_url = "http://techshop.ws/locations.html"
techshop_global_url = "http://techshop.ws/ts_global.html"
class Techshop(Lab):
"""Represents a Techshop as it is described on techshop.ws."""
def __init__(self):
self.source = "techshop.ws"
self.lab_type = "Techshop"
def get_labs(format):
"""Gets Techshop data from techshop.ws."""
techshops_soup = data_from_techshop_ws(techshop_us_url)
techshops = {}
# Load all the TechShops
# By first parsing the html
data = techshops_soup.findAll('div', attrs={'id': 'main-content'})
for element in data:
links = element.findAll('a')
hrefs = {}
for k, a in enumerate(links):
if "contact" not in a['href']:
hrefs[k] = a['href']
for k, v in hrefs.iteritems():
if "http://techshop.ws/" not in v:
hrefs[k] = "http://techshop.ws/" + v
else:
hrefs[k] = v
for k, v in hrefs.iteritems():
if "http://techshop.com/" in v:
hrefs[k] = v.replace("http://techshop.com/", "")
# Remove duplicate pages
hr = []
for key, value in hrefs.iteritems():
if value not in hr:
hr.append(value)
hrefs = hr
# Check all pages
for page in hrefs:
data = data_from_techshop_ws(page)
current_lab = Techshop()
name = data.title.contents[0].split('-- ')[1].encode('utf-8')
if "TechShop" not in name:
name = "TechShop " + name
current_lab.name = name
current_lab.slug = name
current_lab.url = page
# Find Facebook and Twitter links
current_lab.links = {"facebook": "", "twitter": ""}
page_links = data.findAll('a')
for link in page_links:
if link.has_attr("href"):
if "facebook" in link.attrs["href"]:
current_lab.links["facebook"] = link.attrs["href"]
if "twitter" in link.attrs["href"]:
current_lab.links["twitter"] = link.attrs["href"]
# Find the coordinates by analysing the embedded google map
iframes = data.findAll('iframe')
if len(iframes) != 0:
for iframe in iframes:
embed_url = iframe.attrs["src"]
if "google" in embed_url:
two_d = embed_url.find("2d")
three_d = embed_url.find("3d")
longitude = embed_url[two_d:].split('!')[0]
latitude = embed_url[three_d:].split('!')[0]
longitude = longitude[2:]
latitude = latitude[2:]
# ... or the link to google map
else:
page_links = data.findAll('a')
for link in page_links:
# one case...
if "maps.google.com/" in link.attrs["href"]:
embed_url = link.attrs["href"]
if "ll=" in embed_url:
first_string = embed_url.split('&sspn')[0]
coordinates = first_string.split('ll=')[1]
latitude = coordinates.split(',')[0]
longitude = coordinates.split(',')[1]
# ... another case
elif "www.google.com/maps" in link.attrs["href"]:
embed_url = link.attrs["href"]
if "1d" in embed_url:
one_d = embed_url.find("1d")
two_d = embed_url.find("2d")
longitude = embed_url[one_d:].split('!')[0]
latitude = embed_url[two_d:].split('!')[0]
longitude = longitude[2:]
latitude = latitude[2:]
current_lab.latitude = latitude
current_lab.longitude = longitude
current_lab.continent = "North America"
current_lab.country_code = "USA"
current_lab.country = "United States of America"
location = geolocator.reverse((latitude, longitude))
if "city" in location.raw["address"]:
current_lab.county = location.raw["address"]["city"].encode(
'utf-8')
if "county" in location.raw["address"]:
current_lab.county = location.raw["address"]["county"].encode(
'utf-8')
if "state" in location.raw["address"]:
current_lab.state = location.raw["address"]["state"].encode(
'utf-8')
if "postcode" in location.raw["address"]:
current_lab.postal_code = location.raw["address"][
"postcode"].encode('utf-8')
current_lab.address_1 = location.address.encode('utf-8')
# Add the lab to the list
techshops[current_lab.slug] = current_lab
# Return a dictiornary / json
if format.lower() == "dict" or format.lower() == "json":
output = {}
for j in techshops:
output[j] = techshops[j].__dict__
# Return a geojson
elif format.lower() == "geojson" or format.lower() == "geo":
labs_list = []
for l in techshops:
single = techshops[l].__dict__
single_lab = Feature(
type="Feature",
geometry=Point((single["latitude"], single["longitude"])),
properties=single)
labs_list.append(single_lab)
output = dumps(FeatureCollection(labs_list))
# Return a Pandas DataFrame
elif format.lower() == "pandas" or format.lower() == "dataframe":
output = {}
for j in techshops:
output[j] = techshops[j].__dict__
# Transform the dict into a Pandas DataFrame
output = pd.DataFrame.from_dict(output)
output = output.transpose()
# Return an object
elif format.lower() == "object" or format.lower() == "obj":
output = techshops
# Default: return an oject
else:
output = techshops
# Return a proper json
if format.lower() == "json":
output = json.dumps(output)
return output
def labs_count():
"""Gets the number of current Techshops listed on techshops.ws."""
techshops = get_labs("object")
return len(techshops)
if __name__ == "__main__":
pass
|
openp2pdesign/makerlabs
|
makerlabs/techshop_ws.py
|
get_labs
|
python
|
def get_labs(format):
techshops_soup = data_from_techshop_ws(techshop_us_url)
techshops = {}
# Load all the TechShops
# By first parsing the html
data = techshops_soup.findAll('div', attrs={'id': 'main-content'})
for element in data:
links = element.findAll('a')
hrefs = {}
for k, a in enumerate(links):
if "contact" not in a['href']:
hrefs[k] = a['href']
for k, v in hrefs.iteritems():
if "http://techshop.ws/" not in v:
hrefs[k] = "http://techshop.ws/" + v
else:
hrefs[k] = v
for k, v in hrefs.iteritems():
if "http://techshop.com/" in v:
hrefs[k] = v.replace("http://techshop.com/", "")
# Remove duplicate pages
hr = []
for key, value in hrefs.iteritems():
if value not in hr:
hr.append(value)
hrefs = hr
# Check all pages
for page in hrefs:
data = data_from_techshop_ws(page)
current_lab = Techshop()
name = data.title.contents[0].split('-- ')[1].encode('utf-8')
if "TechShop" not in name:
name = "TechShop " + name
current_lab.name = name
current_lab.slug = name
current_lab.url = page
# Find Facebook and Twitter links
current_lab.links = {"facebook": "", "twitter": ""}
page_links = data.findAll('a')
for link in page_links:
if link.has_attr("href"):
if "facebook" in link.attrs["href"]:
current_lab.links["facebook"] = link.attrs["href"]
if "twitter" in link.attrs["href"]:
current_lab.links["twitter"] = link.attrs["href"]
# Find the coordinates by analysing the embedded google map
iframes = data.findAll('iframe')
if len(iframes) != 0:
for iframe in iframes:
embed_url = iframe.attrs["src"]
if "google" in embed_url:
two_d = embed_url.find("2d")
three_d = embed_url.find("3d")
longitude = embed_url[two_d:].split('!')[0]
latitude = embed_url[three_d:].split('!')[0]
longitude = longitude[2:]
latitude = latitude[2:]
# ... or the link to google map
else:
page_links = data.findAll('a')
for link in page_links:
# one case...
if "maps.google.com/" in link.attrs["href"]:
embed_url = link.attrs["href"]
if "ll=" in embed_url:
first_string = embed_url.split('&sspn')[0]
coordinates = first_string.split('ll=')[1]
latitude = coordinates.split(',')[0]
longitude = coordinates.split(',')[1]
# ... another case
elif "www.google.com/maps" in link.attrs["href"]:
embed_url = link.attrs["href"]
if "1d" in embed_url:
one_d = embed_url.find("1d")
two_d = embed_url.find("2d")
longitude = embed_url[one_d:].split('!')[0]
latitude = embed_url[two_d:].split('!')[0]
longitude = longitude[2:]
latitude = latitude[2:]
current_lab.latitude = latitude
current_lab.longitude = longitude
current_lab.continent = "North America"
current_lab.country_code = "USA"
current_lab.country = "United States of America"
location = geolocator.reverse((latitude, longitude))
if "city" in location.raw["address"]:
current_lab.county = location.raw["address"]["city"].encode(
'utf-8')
if "county" in location.raw["address"]:
current_lab.county = location.raw["address"]["county"].encode(
'utf-8')
if "state" in location.raw["address"]:
current_lab.state = location.raw["address"]["state"].encode(
'utf-8')
if "postcode" in location.raw["address"]:
current_lab.postal_code = location.raw["address"][
"postcode"].encode('utf-8')
current_lab.address_1 = location.address.encode('utf-8')
# Add the lab to the list
techshops[current_lab.slug] = current_lab
# Return a dictiornary / json
if format.lower() == "dict" or format.lower() == "json":
output = {}
for j in techshops:
output[j] = techshops[j].__dict__
# Return a geojson
elif format.lower() == "geojson" or format.lower() == "geo":
labs_list = []
for l in techshops:
single = techshops[l].__dict__
single_lab = Feature(
type="Feature",
geometry=Point((single["latitude"], single["longitude"])),
properties=single)
labs_list.append(single_lab)
output = dumps(FeatureCollection(labs_list))
# Return a Pandas DataFrame
elif format.lower() == "pandas" or format.lower() == "dataframe":
output = {}
for j in techshops:
output[j] = techshops[j].__dict__
# Transform the dict into a Pandas DataFrame
output = pd.DataFrame.from_dict(output)
output = output.transpose()
# Return an object
elif format.lower() == "object" or format.lower() == "obj":
output = techshops
# Default: return an oject
else:
output = techshops
# Return a proper json
if format.lower() == "json":
output = json.dumps(output)
return output
|
Gets Techshop data from techshop.ws.
|
train
|
https://github.com/openp2pdesign/makerlabs/blob/b5838440174f10d370abb671358db9a99d7739fd/makerlabs/techshop_ws.py#L50-L191
|
[
"def data_from_techshop_ws(tws_url):\n \"\"\"Scrapes data from techshop.ws.\"\"\"\n\n r = requests.get(tws_url)\n if r.status_code == 200:\n data = BeautifulSoup(r.text, \"lxml\")\n else:\n data = \"There was an error while accessing data on techshop.ws.\"\n\n return data\n"
] |
# -*- encoding: utf-8 -*-
#
# Access data from techshop.ws
#
# Author: Massimo Menichinelli
# Homepage: http://www.openp2pdesign.org
# License: LGPL v.3
#
#
from classes import Lab
import json
from bs4 import BeautifulSoup
import requests
from geojson import dumps, Feature, Point, FeatureCollection
from geopy.geocoders import Nominatim
import pandas as pd
# Geocoding variable
geolocator = Nominatim()
# Endpoints
techshop_us_url = "http://techshop.ws/locations.html"
techshop_global_url = "http://techshop.ws/ts_global.html"
class Techshop(Lab):
"""Represents a Techshop as it is described on techshop.ws."""
def __init__(self):
self.source = "techshop.ws"
self.lab_type = "Techshop"
def data_from_techshop_ws(tws_url):
"""Scrapes data from techshop.ws."""
r = requests.get(tws_url)
if r.status_code == 200:
data = BeautifulSoup(r.text, "lxml")
else:
data = "There was an error while accessing data on techshop.ws."
return data
def labs_count():
"""Gets the number of current Techshops listed on techshops.ws."""
techshops = get_labs("object")
return len(techshops)
if __name__ == "__main__":
pass
|
openp2pdesign/makerlabs
|
makerlabs/fablabs_io.py
|
get_labs
|
python
|
def get_labs(format):
fablabs_json = data_from_fablabs_io(fablabs_io_labs_api_url_v0)
fablabs = {}
# Load all the FabLabs
for i in fablabs_json["labs"]:
current_lab = FabLab()
current_lab.name = i["name"]
current_lab.address_1 = i["address_1"]
current_lab.address_2 = i["address_2"]
current_lab.address_notes = i["address_notes"]
current_lab.avatar = i["avatar_url"]
current_lab.blurb = i["blurb"]
current_lab.capabilities = i["capabilities"]
if i["city"].isupper():
i["city"] = i["city"].title()
current_lab.city = i["city"]
current_lab.country_code = i["country_code"]
current_lab.county = i["county"]
current_lab.description = i["description"]
current_lab.email = i["email"]
current_lab.id = i["id"]
current_lab.phone = i["phone"]
current_lab.postal_code = i["postal_code"]
current_lab.slug = i["slug"]
current_lab.url = i["url"]
current_lab.continent = country_alpha2_to_continent_code(i["country_code"].upper())
current_country = pycountry.countries.get(alpha_2=i["country_code"].upper())
current_lab.country_code = current_country.alpha_3
current_lab.country = current_country.name
# Check coordinates
if i["longitude"] is not None:
current_lab.longitude = i["longitude"]
else:
current_lab.longitude = 0.0
if i["latitude"] is not None:
current_lab.latitude = i["latitude"]
else:
current_lab.latitude = 0.0
# Find Facebook and Twitter links, add also the other ones
current_lab.links = {"facebook": "", "twitter": ""}
for link in i["links"]:
if "facebook" in link["url"]:
current_lab.links["facebook"] = link["url"]
elif "twitter" in link["url"]:
current_lab.links["twitter"] = link["url"]
else:
current_lab.links[link["id"]] = link["url"]
# Add the lab to the list
fablabs[i["slug"]] = current_lab
# Return a dictiornary / json
if format.lower() == "dict" or format.lower() == "json":
output = {}
for j in fablabs:
output[j] = fablabs[j].__dict__
# Return a geojson
elif format.lower() == "geojson" or format.lower() == "geo":
labs_list = []
for l in fablabs:
single = fablabs[l].__dict__
single_lab = Feature(
type="Feature",
geometry=Point((single["latitude"], single["longitude"])),
properties=single)
labs_list.append(single_lab)
output = dumps(FeatureCollection(labs_list))
# Return a Pandas DataFrame
elif format.lower() == "pandas" or format.lower() == "dataframe":
output = {}
for j in fablabs:
output[j] = fablabs[j].__dict__
# Transform the dict into a Pandas DataFrame
output = pd.DataFrame.from_dict(output)
output = output.transpose()
# Return an object
elif format.lower() == "object" or format.lower() == "obj":
output = fablabs
# Default: return an oject
else:
output = fablabs
# Return a proper json
if format.lower() == "json":
output = json.dumps(output)
return output
|
Gets Fab Lab data from fablabs.io.
|
train
|
https://github.com/openp2pdesign/makerlabs/blob/b5838440174f10d370abb671358db9a99d7739fd/makerlabs/fablabs_io.py#L82-L172
|
[
"def data_from_fablabs_io(endpoint):\n \"\"\"Gets data from fablabs.io.\"\"\"\n\n data = requests.get(endpoint).json()\n\n return data\n"
] |
# -*- encoding: utf-8 -*-
#
# Access data from fablabs.io
#
# Author: Massimo Menichinelli
# Homepage: http://www.openp2pdesign.org
# License: LGPL v.3
#
#
from classes import Lab
import json
import requests
from geojson import dumps, Feature, Point, FeatureCollection
from geopy.geocoders import Nominatim
import pycountry
from pycountry_convert import country_alpha2_to_continent_code
from time import sleep
import pandas as pd
# Geocoding variable
geolocator = Nominatim()
# Endpoints
fablabs_io_labs_api_url_v0 = "https://api.fablabs.io/v0/labs.json"
fablabs_io_projects_api_url_v0 = "https://api.fablabs.io/v0/projects.json"
class FabLab(Lab):
"""Represents a Fab Lab as it is described on fablabs.io."""
def __init__(self):
self.source = "fablabs.io"
self.lab_type = "Fab Lab"
class Project(object):
"""Represents a project as it is described on fablabs.io."""
def __init__(self):
self.id = ""
self.title = ""
self.description = ""
self.github = ""
self.web = ""
self.dropbox = ""
self.bitbucket = ""
self.lab_id = ""
self.lab = ""
self.owner_id = ""
self.created_at = ""
self.updated_at = ""
self.vimeo = ""
self.flickr = ""
self.youtube = ""
self.drive = ""
self.twitter = ""
self.facebook = ""
self.googleplus = ""
self.instagram = ""
self.status = ""
self.version = ""
self.faq = ""
self.scope = ""
self.community = ""
self.lookingfor = ""
self.cover = ""
self.type = "Project in a Fab Lab"
def data_from_fablabs_io(endpoint):
"""Gets data from fablabs.io."""
data = requests.get(endpoint).json()
return data
def labs_count():
"""Gets the number of current Fab Labs registered on fablabs.io."""
fablabs = data_from_fablabs_io(fablabs_io_labs_api_url_v0)
return len(fablabs["labs"])
def get_projects(format):
"""Gets projects data from fablabs.io."""
projects_json = data_from_fablabs_io(fablabs_io_projects_api_url_v0)
projects = {}
project_url = "https://www.fablabs.io/projects/"
fablabs = get_labs(format="object")
# Load all the FabLabs
for i in projects_json["projects"]:
i = i["projects"]
current_project = Project()
current_project.id = i["id"]
current_project.title = i["title"]
current_project.description = i["description"]
current_project.github = i["github"]
current_project.web = i["web"]
current_project.dropbox = i["dropbox"]
current_project.bitbucket = i["bitbucket"]
current_project.lab_id = i["lab_id"]
# Add the lab of the project
if i["lab_id"] is not None:
for k in fablabs:
if fablabs[k].id == i["lab_id"]:
current_project.lab = fablabs[k]
else:
current_project.lab = None
current_project.owner_id = i["owner_id"]
current_project.created_at = i["created_at"]
current_project.updated_at = i["updated_at"]
current_project.vimeo = i["vimeo"]
current_project.flickr = i["flickr"]
current_project.youtube = i["youtube"]
current_project.drive = i["drive"]
current_project.twitter = i["twitter"]
current_project.facebook = i["facebook"]
current_project.googleplus = i["googleplus"]
current_project.instagram = i["instagram"]
current_project.status = i["status"]
current_project.version = i["version"]
current_project.faq = i["faq"]
current_project.scope = i["scope"]
current_project.community = i["community"]
current_project.lookingfor = i["lookingfor"]
current_project.cover = i["cover"]
url = project_url + str(current_project.id)
current_project.url = url
# Add the project
projects[current_project.id] = current_project
# Return a dictiornary / json
if format.lower() == "dict" or format.lower() == "json":
output = {}
for j in projects:
project_dict = projects[j].__dict__
# Convert the lab from a Fab Lab object to a dict
if project_dict["lab"] is not None:
project_dict["lab"] = project_dict["lab"].__dict__
output[j] = project_dict
# Return a geojson, only for projects linked to a lab
elif format.lower() == "geojson" or format.lower() == "geo":
projects_list = []
for p in projects:
if projects[p].lab_id is not None:
single_project = projects[p].__dict__
if projects[p].lab is not None:
single_project["lab"] = single_project["lab"].__dict__
for l in fablabs:
single_lab = fablabs[l].__dict__
if single_lab["id"] == single_project["lab_id"]:
project_lab = Feature(
type="Feature",
geometry=Point((single_lab["latitude"],
single_lab["longitude"])),
properties=single_project)
projects_list.append(project_lab)
output = dumps(FeatureCollection(projects_list))
# Return an object
elif format.lower() == "object" or format.lower() == "obj":
output = projects
# Default: return an object
else:
output = projects
# Return a proper json
if format.lower() == "json":
output = json.dumps(output)
return output
def projects_count():
"""Gets the number of current projects submitted on fablabs.io."""
projects = data_from_fablabs_io(fablabs_io_projects_api_url_v0)
return len(projects["projects"])
if __name__ == "__main__":
print get_labs(format="json")
|
openp2pdesign/makerlabs
|
makerlabs/fablabs_io.py
|
get_projects
|
python
|
def get_projects(format):
projects_json = data_from_fablabs_io(fablabs_io_projects_api_url_v0)
projects = {}
project_url = "https://www.fablabs.io/projects/"
fablabs = get_labs(format="object")
# Load all the FabLabs
for i in projects_json["projects"]:
i = i["projects"]
current_project = Project()
current_project.id = i["id"]
current_project.title = i["title"]
current_project.description = i["description"]
current_project.github = i["github"]
current_project.web = i["web"]
current_project.dropbox = i["dropbox"]
current_project.bitbucket = i["bitbucket"]
current_project.lab_id = i["lab_id"]
# Add the lab of the project
if i["lab_id"] is not None:
for k in fablabs:
if fablabs[k].id == i["lab_id"]:
current_project.lab = fablabs[k]
else:
current_project.lab = None
current_project.owner_id = i["owner_id"]
current_project.created_at = i["created_at"]
current_project.updated_at = i["updated_at"]
current_project.vimeo = i["vimeo"]
current_project.flickr = i["flickr"]
current_project.youtube = i["youtube"]
current_project.drive = i["drive"]
current_project.twitter = i["twitter"]
current_project.facebook = i["facebook"]
current_project.googleplus = i["googleplus"]
current_project.instagram = i["instagram"]
current_project.status = i["status"]
current_project.version = i["version"]
current_project.faq = i["faq"]
current_project.scope = i["scope"]
current_project.community = i["community"]
current_project.lookingfor = i["lookingfor"]
current_project.cover = i["cover"]
url = project_url + str(current_project.id)
current_project.url = url
# Add the project
projects[current_project.id] = current_project
# Return a dictiornary / json
if format.lower() == "dict" or format.lower() == "json":
output = {}
for j in projects:
project_dict = projects[j].__dict__
# Convert the lab from a Fab Lab object to a dict
if project_dict["lab"] is not None:
project_dict["lab"] = project_dict["lab"].__dict__
output[j] = project_dict
# Return a geojson, only for projects linked to a lab
elif format.lower() == "geojson" or format.lower() == "geo":
projects_list = []
for p in projects:
if projects[p].lab_id is not None:
single_project = projects[p].__dict__
if projects[p].lab is not None:
single_project["lab"] = single_project["lab"].__dict__
for l in fablabs:
single_lab = fablabs[l].__dict__
if single_lab["id"] == single_project["lab_id"]:
project_lab = Feature(
type="Feature",
geometry=Point((single_lab["latitude"],
single_lab["longitude"])),
properties=single_project)
projects_list.append(project_lab)
output = dumps(FeatureCollection(projects_list))
# Return an object
elif format.lower() == "object" or format.lower() == "obj":
output = projects
# Default: return an object
else:
output = projects
# Return a proper json
if format.lower() == "json":
output = json.dumps(output)
return output
|
Gets projects data from fablabs.io.
|
train
|
https://github.com/openp2pdesign/makerlabs/blob/b5838440174f10d370abb671358db9a99d7739fd/makerlabs/fablabs_io.py#L183-L269
|
[
"def get_labs(format):\n \"\"\"Gets Fab Lab data from fablabs.io.\"\"\"\n\n fablabs_json = data_from_fablabs_io(fablabs_io_labs_api_url_v0)\n fablabs = {}\n\n # Load all the FabLabs\n for i in fablabs_json[\"labs\"]:\n current_lab = FabLab()\n current_lab.name = i[\"name\"]\n current_lab.address_1 = i[\"address_1\"]\n current_lab.address_2 = i[\"address_2\"]\n current_lab.address_notes = i[\"address_notes\"]\n current_lab.avatar = i[\"avatar_url\"]\n current_lab.blurb = i[\"blurb\"]\n current_lab.capabilities = i[\"capabilities\"]\n if i[\"city\"].isupper():\n i[\"city\"] = i[\"city\"].title()\n current_lab.city = i[\"city\"]\n current_lab.country_code = i[\"country_code\"]\n current_lab.county = i[\"county\"]\n current_lab.description = i[\"description\"]\n current_lab.email = i[\"email\"]\n current_lab.id = i[\"id\"]\n current_lab.phone = i[\"phone\"]\n current_lab.postal_code = i[\"postal_code\"]\n current_lab.slug = i[\"slug\"]\n current_lab.url = i[\"url\"]\n\n current_lab.continent = country_alpha2_to_continent_code(i[\"country_code\"].upper())\n current_country = pycountry.countries.get(alpha_2=i[\"country_code\"].upper())\n current_lab.country_code = current_country.alpha_3\n current_lab.country = current_country.name\n\n # Check coordinates\n if i[\"longitude\"] is not None:\n current_lab.longitude = i[\"longitude\"]\n else:\n current_lab.longitude = 0.0\n if i[\"latitude\"] is not None:\n current_lab.latitude = i[\"latitude\"]\n else:\n current_lab.latitude = 0.0\n\n # Find Facebook and Twitter links, add also the other ones\n current_lab.links = {\"facebook\": \"\", \"twitter\": \"\"}\n for link in i[\"links\"]:\n if \"facebook\" in link[\"url\"]:\n current_lab.links[\"facebook\"] = link[\"url\"]\n elif \"twitter\" in link[\"url\"]:\n current_lab.links[\"twitter\"] = link[\"url\"]\n else:\n current_lab.links[link[\"id\"]] = link[\"url\"]\n\n # Add the lab to the list\n fablabs[i[\"slug\"]] = current_lab\n\n # Return a dictiornary / json\n if format.lower() == \"dict\" or format.lower() == \"json\":\n output = {}\n for j in fablabs:\n output[j] = fablabs[j].__dict__\n # Return a geojson\n elif format.lower() == \"geojson\" or format.lower() == \"geo\":\n labs_list = []\n for l in fablabs:\n single = fablabs[l].__dict__\n single_lab = Feature(\n type=\"Feature\",\n geometry=Point((single[\"latitude\"], single[\"longitude\"])),\n properties=single)\n labs_list.append(single_lab)\n output = dumps(FeatureCollection(labs_list))\n # Return a Pandas DataFrame\n elif format.lower() == \"pandas\" or format.lower() == \"dataframe\":\n output = {}\n for j in fablabs:\n output[j] = fablabs[j].__dict__\n # Transform the dict into a Pandas DataFrame\n output = pd.DataFrame.from_dict(output)\n output = output.transpose()\n # Return an object\n elif format.lower() == \"object\" or format.lower() == \"obj\":\n output = fablabs\n # Default: return an oject\n else:\n output = fablabs\n # Return a proper json\n if format.lower() == \"json\":\n output = json.dumps(output)\n return output\n",
"def data_from_fablabs_io(endpoint):\n \"\"\"Gets data from fablabs.io.\"\"\"\n\n data = requests.get(endpoint).json()\n\n return data\n"
] |
# -*- encoding: utf-8 -*-
#
# Access data from fablabs.io
#
# Author: Massimo Menichinelli
# Homepage: http://www.openp2pdesign.org
# License: LGPL v.3
#
#
from classes import Lab
import json
import requests
from geojson import dumps, Feature, Point, FeatureCollection
from geopy.geocoders import Nominatim
import pycountry
from pycountry_convert import country_alpha2_to_continent_code
from time import sleep
import pandas as pd
# Geocoding variable
geolocator = Nominatim()
# Endpoints
fablabs_io_labs_api_url_v0 = "https://api.fablabs.io/v0/labs.json"
fablabs_io_projects_api_url_v0 = "https://api.fablabs.io/v0/projects.json"
class FabLab(Lab):
"""Represents a Fab Lab as it is described on fablabs.io."""
def __init__(self):
self.source = "fablabs.io"
self.lab_type = "Fab Lab"
class Project(object):
"""Represents a project as it is described on fablabs.io."""
def __init__(self):
self.id = ""
self.title = ""
self.description = ""
self.github = ""
self.web = ""
self.dropbox = ""
self.bitbucket = ""
self.lab_id = ""
self.lab = ""
self.owner_id = ""
self.created_at = ""
self.updated_at = ""
self.vimeo = ""
self.flickr = ""
self.youtube = ""
self.drive = ""
self.twitter = ""
self.facebook = ""
self.googleplus = ""
self.instagram = ""
self.status = ""
self.version = ""
self.faq = ""
self.scope = ""
self.community = ""
self.lookingfor = ""
self.cover = ""
self.type = "Project in a Fab Lab"
def data_from_fablabs_io(endpoint):
"""Gets data from fablabs.io."""
data = requests.get(endpoint).json()
return data
def get_labs(format):
"""Gets Fab Lab data from fablabs.io."""
fablabs_json = data_from_fablabs_io(fablabs_io_labs_api_url_v0)
fablabs = {}
# Load all the FabLabs
for i in fablabs_json["labs"]:
current_lab = FabLab()
current_lab.name = i["name"]
current_lab.address_1 = i["address_1"]
current_lab.address_2 = i["address_2"]
current_lab.address_notes = i["address_notes"]
current_lab.avatar = i["avatar_url"]
current_lab.blurb = i["blurb"]
current_lab.capabilities = i["capabilities"]
if i["city"].isupper():
i["city"] = i["city"].title()
current_lab.city = i["city"]
current_lab.country_code = i["country_code"]
current_lab.county = i["county"]
current_lab.description = i["description"]
current_lab.email = i["email"]
current_lab.id = i["id"]
current_lab.phone = i["phone"]
current_lab.postal_code = i["postal_code"]
current_lab.slug = i["slug"]
current_lab.url = i["url"]
current_lab.continent = country_alpha2_to_continent_code(i["country_code"].upper())
current_country = pycountry.countries.get(alpha_2=i["country_code"].upper())
current_lab.country_code = current_country.alpha_3
current_lab.country = current_country.name
# Check coordinates
if i["longitude"] is not None:
current_lab.longitude = i["longitude"]
else:
current_lab.longitude = 0.0
if i["latitude"] is not None:
current_lab.latitude = i["latitude"]
else:
current_lab.latitude = 0.0
# Find Facebook and Twitter links, add also the other ones
current_lab.links = {"facebook": "", "twitter": ""}
for link in i["links"]:
if "facebook" in link["url"]:
current_lab.links["facebook"] = link["url"]
elif "twitter" in link["url"]:
current_lab.links["twitter"] = link["url"]
else:
current_lab.links[link["id"]] = link["url"]
# Add the lab to the list
fablabs[i["slug"]] = current_lab
# Return a dictiornary / json
if format.lower() == "dict" or format.lower() == "json":
output = {}
for j in fablabs:
output[j] = fablabs[j].__dict__
# Return a geojson
elif format.lower() == "geojson" or format.lower() == "geo":
labs_list = []
for l in fablabs:
single = fablabs[l].__dict__
single_lab = Feature(
type="Feature",
geometry=Point((single["latitude"], single["longitude"])),
properties=single)
labs_list.append(single_lab)
output = dumps(FeatureCollection(labs_list))
# Return a Pandas DataFrame
elif format.lower() == "pandas" or format.lower() == "dataframe":
output = {}
for j in fablabs:
output[j] = fablabs[j].__dict__
# Transform the dict into a Pandas DataFrame
output = pd.DataFrame.from_dict(output)
output = output.transpose()
# Return an object
elif format.lower() == "object" or format.lower() == "obj":
output = fablabs
# Default: return an oject
else:
output = fablabs
# Return a proper json
if format.lower() == "json":
output = json.dumps(output)
return output
def labs_count():
"""Gets the number of current Fab Labs registered on fablabs.io."""
fablabs = data_from_fablabs_io(fablabs_io_labs_api_url_v0)
return len(fablabs["labs"])
def projects_count():
"""Gets the number of current projects submitted on fablabs.io."""
projects = data_from_fablabs_io(fablabs_io_projects_api_url_v0)
return len(projects["projects"])
if __name__ == "__main__":
print get_labs(format="json")
|
openp2pdesign/makerlabs
|
makerlabs/hackerspaces_org.py
|
get_single_lab
|
python
|
def get_single_lab(lab_slug, open_cage_api_key):
wiki = MediaWiki(hackerspaces_org_api_url)
wiki_response = wiki.call(
{'action': 'query',
'titles': lab_slug,
'prop': 'revisions',
'rvprop': 'content'})
# If we don't know the pageid...
for i in wiki_response["query"]["pages"]:
content = wiki_response["query"]["pages"][i]["revisions"][0]["*"]
# Transform the data into a Lab object
current_lab = Hackerspace()
equipment_list = []
# Parse the Mediawiki code
wikicode = mwparserfromhell.parse(content)
for k in wikicode.filter_templates():
element_name = unicode(k.name)
if "Hackerspace" in element_name:
for j in k.params:
current_lab.name = lab_slug
j_value = unicode(j.value)
j_name = unicode(j.name)
# Remove new line in content
if j_value[-1:] == "\n" or j_value[:1] == "\n":
j_value = j_value.replace('\n', '')
if j_name == "logo":
current_lab.logo = j_value
if j_name == "founding":
current_lab.founding = j_value
if j_name == "coordinate":
# Clean the coordinates
j_value = j_value.replace('"', '')
j_value = j_value.replace('N', '')
j_value = j_value.replace('S', '')
j_value = j_value.replace('W', '')
j_value = j_value.replace('E', '')
j_value = j_value.replace(u'°', '')
j_value = j_value.replace(' ', '')
# Get the full address with the coordinates
address = get_location(query=j_value, format="reverse", api_key=open_cage_api_key)
current_lab.city = address["city"]
current_lab.county = address["county"]
current_lab.state = address["state"]
current_lab.postal_code = address["postal_code"]
current_lab.address_1 = address["address_1"]
current_lab.country = address["country"]
current_lab.country_code = address["country_code"]
current_lab.continent = address["continent"]
current_lab.latitude = address["latitude"]
current_lab.longitude = address["longitude"]
if j_name == "membercount":
current_lab.membercount = j_value
if j_name == "fee":
current_lab.fee = j_value
if j_name == "size":
current_lab.size = j_value
if j_name == "status":
current_lab.status = j_value
if j_name == "site":
current_lab.site = j_value
if j_name == "wiki":
current_lab.wiki = j_value
if j_name == "irc":
current_lab.irc = j_value
if j_name == "jabber":
current_lab.jabber = j_value
if j_name == "phone":
current_lab.phone = j_value
if j_name == "youtube":
current_lab.youtube = j_value
if j_name == "eventbrite":
current_lab.eventbrite = j_value
if j_name == "facebook":
current_lab.facebook = j_value
if j_name == "ustream":
current_lab.ustream = j_value
if j_name == "flickr":
current_lab.flickr = j_value
if j_name == "twitter":
current_lab.twitter = j_value
if j_name == "googleplus":
current_lab.googleplus = j_value
if j_name == "email":
current_lab.email = j_value
if j_name == "maillist":
current_lab.maillist = j_value
if j_name == "ical":
current_lab.ical = j_value
if j_name == "forum":
current_lab.forum = j_value
elif "Equipment" in element_name:
for j in k.params:
equipment_list.append(j.replace("equipment=", ""))
current_lab.equipment = equipment_list
# Load the free text
freetext = ""
for k in wikicode._nodes:
try:
test_value = k.name
except AttributeError:
freetext += unicode(k)
current_lab.text = freetext
return current_lab
|
Gets data from a single lab from hackerspaces.org.
|
train
|
https://github.com/openp2pdesign/makerlabs/blob/b5838440174f10d370abb671358db9a99d7739fd/makerlabs/hackerspaces_org.py#L32-L142
| null |
# -*- encoding: utf-8 -*-
#
# Access data from hackerspaces.org
#
# Author: Massimo Menichinelli
# Homepage: http://www.openp2pdesign.org
# License: LGPL v.3
#
#
from classes import Lab
from utils import get_location
import json
from simplemediawiki import MediaWiki
import mwparserfromhell
import pandas as pd
hackerspaces_org_api_url = "https://wiki.hackerspaces.org/w/api.php"
class Hackerspace(Lab):
"""Represents a Hackerspace as it is described on hackerspaces.org."""
def __init__(self):
self.source = "hackerspaces.org"
self.lab_type = "Hackerspace"
def get_labs(format, open_cage_api_key):
"""Gets data from all labs from hackerspaces.org."""
labs = []
# Get the first page of data
wiki = MediaWiki(hackerspaces_org_api_url)
wiki_response = wiki.call(
{'action': 'query',
'list': 'categorymembers',
'cmtitle': 'Category:Hackerspace',
'cmlimit': '500'})
nextpage = wiki_response["query-continue"]["categorymembers"]["cmcontinue"]
urls = []
for i in wiki_response["query"]["categorymembers"]:
urls.append(i["title"].replace(" ", "_"))
# Load all the Labs in the first page
for i in urls:
current_lab = get_single_lab(i, open_cage_api_key)
labs.append(current_lab)
# Load all the Labs from the other pages
while "query-continue" in wiki_response:
wiki = MediaWiki(hackerspaces_org_api_url)
wiki_response = wiki.call({'action': 'query',
'list': 'categorymembers',
'cmtitle': 'Category:Hackerspace',
'cmlimit': '500',
"cmcontinue": nextpage})
urls = []
for i in wiki_response["query"]["categorymembers"]:
urls.append(i["title"].replace(" ", "_"))
# Load all the Labs
for i in urls:
current_lab = get_single_lab(i, open_cage_api_key)
labs.append(current_lab)
if "query-continue" in wiki_response:
nextpage = wiki_response[
"query-continue"]["categorymembers"]["cmcontinue"]
else:
break
# Transform the list into a dictionary
labs_dict = {}
for j, k in enumerate(labs):
labs_dict[j] = k.__dict__
# Return a dictiornary / json
if format.lower() == "dict" or format.lower() == "json":
output = labs_dict
# Return a geojson
elif format.lower() == "geojson" or format.lower() == "geo":
labs_list = []
for l in labs_dict:
single = labs_dict[l].__dict__
single_lab = Feature(
type="Feature",
geometry=Point((single["latitude"], single["longitude"])),
properties=single)
labs_list.append(single_lab)
output = dumps(FeatureCollection(labs_list))
# Return a Pandas DataFrame
elif format.lower() == "pandas" or format.lower() == "dataframe":
output = labs_dict
# Transform the dict into a Pandas DataFrame
output = pd.DataFrame.from_dict(output)
output = output.transpose()
output = output.set_index(['name'])
# Return an object
elif format.lower() == "object" or format.lower() == "obj":
output = labs
# Default: return an object
else:
output = labs
# Return a proper json
if format.lower() == "json":
output = json.dumps(labs_dict)
return output
def labs_count():
"""Gets the number of current Hackerspaces registered on hackerspaces.org."""
labs = get_labs()
return len(labs)
if __name__ == "__main__":
pass
|
wtsi-hgi/gitlab-build-variables
|
gitlabbuildvariables/executables/gitlab_get_variables.py
|
_parse_args
|
python
|
def _parse_args(args: List[str]) -> ProjectRunConfig:
parser = argparse.ArgumentParser(prog="gitlab-get-variables", description="Tool for getting a GitLab project's "
"build variables")
add_common_arguments(parser, project=True)
arguments = parser.parse_args(args)
return ProjectRunConfig(project=arguments.project, url=arguments.url, token=arguments.token, debug=arguments.debug)
|
Parses the given CLI arguments to get a run configuration.
:param args: CLI arguments
:return: run configuration derived from the given CLI arguments
|
train
|
https://github.com/wtsi-hgi/gitlab-build-variables/blob/ed1afe50bc41fa20ffb29cacba5ff6dbc2446808/gitlabbuildvariables/executables/gitlab_get_variables.py#L11-L21
|
[
"def add_common_arguments(parser: ArgumentParser, project: bool=False):\n \"\"\"\n Adds common arguments to the given argument parser.\n :param parser: argument parser\n :param url: whether the URL named argument should be added\n :param token: whether the access token named argument should be added\n :param project: whether the project positional argument should be added\n \"\"\"\n parser.add_argument(\"--url\", type=str, help=\"Location of GitLab\")\n parser.add_argument(\"--token\", type=str, help=\"GitLab access token\")\n parser.add_argument(\"--debug\", action=\"store_true\", default=False, help=\"Turns on debugging\")\n if project:\n parser.add_argument(\"project\", type=str, help=\"The GitLab project to set the build variables for\")\n"
] |
import argparse
import json
import sys
from typing import List
from gitlabbuildvariables.common import GitLabConfig
from gitlabbuildvariables.executables._common import add_common_arguments, ProjectRunConfig
from gitlabbuildvariables.manager import ProjectVariablesManager
def main():
"""
Main method.
"""
run_config = _parse_args(sys.argv[1:])
gitlab_config = GitLabConfig(run_config.url, run_config.token)
manager = ProjectVariablesManager(gitlab_config, run_config.project)
output = json.dumps(manager.get(), sort_keys=True, indent=4, separators=(",", ": "))
print(output)
if __name__ == "__main__":
main()
|
wtsi-hgi/gitlab-build-variables
|
gitlabbuildvariables/executables/gitlab_get_variables.py
|
main
|
python
|
def main():
run_config = _parse_args(sys.argv[1:])
gitlab_config = GitLabConfig(run_config.url, run_config.token)
manager = ProjectVariablesManager(gitlab_config, run_config.project)
output = json.dumps(manager.get(), sort_keys=True, indent=4, separators=(",", ": "))
print(output)
|
Main method.
|
train
|
https://github.com/wtsi-hgi/gitlab-build-variables/blob/ed1afe50bc41fa20ffb29cacba5ff6dbc2446808/gitlabbuildvariables/executables/gitlab_get_variables.py#L24-L32
|
[
"def _parse_args(args: List[str]) -> ProjectRunConfig:\n \"\"\"\n Parses the given CLI arguments to get a run configuration.\n :param args: CLI arguments\n :return: run configuration derived from the given CLI arguments\n \"\"\"\n parser = argparse.ArgumentParser(prog=\"gitlab-get-variables\", description=\"Tool for getting a GitLab project's \"\n \"build variables\")\n add_common_arguments(parser, project=True)\n arguments = parser.parse_args(args)\n return ProjectRunConfig(project=arguments.project, url=arguments.url, token=arguments.token, debug=arguments.debug)\n",
"def get(self) -> Dict[str, str]:\n \"\"\"\n Gets the build variables for the project.\n :return: the build variables\n \"\"\"\n variables = self._project.variables.list(all=True)\n return {variable.key: variable.value for variable in variables}\n"
] |
import argparse
import json
import sys
from typing import List
from gitlabbuildvariables.common import GitLabConfig
from gitlabbuildvariables.executables._common import add_common_arguments, ProjectRunConfig
from gitlabbuildvariables.manager import ProjectVariablesManager
def _parse_args(args: List[str]) -> ProjectRunConfig:
"""
Parses the given CLI arguments to get a run configuration.
:param args: CLI arguments
:return: run configuration derived from the given CLI arguments
"""
parser = argparse.ArgumentParser(prog="gitlab-get-variables", description="Tool for getting a GitLab project's "
"build variables")
add_common_arguments(parser, project=True)
arguments = parser.parse_args(args)
return ProjectRunConfig(project=arguments.project, url=arguments.url, token=arguments.token, debug=arguments.debug)
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.